diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index ee2646490db5..cfefdb13a695 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: false contact_links: - name: GitHub Discussions - url: https://github.com/foundry-rs/reth/discussions + url: https://github.com/paradigmxyz/reth/discussions about: Please ask and answer questions here to keep the issue tracker clean. - - name: Security - url: mailto:georgios@paradigm.xyz - about: Please report security vulnerabilities here. diff --git a/.github/assets/check_no_std.sh b/.github/assets/check_no_std.sh new file mode 100755 index 000000000000..f19e39ddac90 --- /dev/null +++ b/.github/assets/check_no_std.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -eo pipefail + +# TODO +no_std_packages=( +# reth-codecs +# reth-consensus +# reth-db +# reth-errors +# reth-ethereum-forks +# reth-evm +# reth-evm-ethereum +# reth-network-peers +# reth-primitives +# reth-primitives-traits +# reth-revm +) + +for package in "${no_std_packages[@]}"; do + cmd="cargo +stable build -p $package --target riscv32imac-unknown-none-elf --no-default-features" + + if [ -n "$CI" ]; then + echo "::group::$cmd" + else + printf "\n%s:\n %s\n" "$package" "$cmd" + fi + + $cmd + + if [ -n "$CI" ]; then + echo "::endgroup::" + fi +done diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile new file mode 100644 index 000000000000..9f75ba6f1cf2 --- /dev/null +++ b/.github/assets/hive/Dockerfile @@ -0,0 +1,8 @@ +FROM ubuntu + +COPY dist/reth /usr/local/bin + +COPY LICENSE-* ./ + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT ["/usr/local/bin/reth"] \ No newline at end of file diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh new file mode 100755 index 000000000000..45583d549a37 --- /dev/null +++ b/.github/assets/hive/build_simulators.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -eo pipefail + +# Create the hive_assets directory +mkdir hive_assets/ + +cd hivetests +go build . + +./hive -client reth # first builds and caches the client + +# Run each hive command in the background for each simulator and wait +echo "Building images" +./hive -client reth --sim "pyspec" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & +./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & +./hive -client reth --sim "smoke/genesis" -sim.timelimit 1s || true & +./hive -client reth --sim "smoke/network" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/sync" -sim.timelimit 1s || true & +wait + +# Run docker save in parallel and wait +echo "Saving images" +docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & +docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & +docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & +docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & +docker save hive/simulators/ethereum/pyspec:latest -o ../hive_assets/pyspec.tar & +docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & +docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & +docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & +wait + +# Make sure we don't rebuild images on the CI jobs +git apply ../.github/assets/hive/no_sim_build.diff +go build . +mv ./hive ../hive_assets/ \ No newline at end of file diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml new file mode 100644 index 000000000000..ba29ca5e7097 --- /dev/null +++ b/.github/assets/hive/expected_failures.yaml @@ -0,0 +1,120 @@ +# https://github.com/paradigmxyz/reth/issues/7015 +# https://github.com/paradigmxyz/reth/issues/6332 +rpc-compat: + - debug_getRawBlock/get-invalid-number (reth) + - debug_getRawHeader/get-invalid-number (reth) + - debug_getRawReceipts/get-invalid-number (reth) + - debug_getRawTransaction/get-invalid-hash (reth) + + - eth_call/call-callenv (reth) + - eth_createAccessList/create-al-contract-eip1559 (reth) + - eth_createAccessList/create-al-contract (reth) + - eth_feeHistory/fee-history (reth) + - eth_getStorageAt/get-storage-invalid-key-too-large (reth) + - eth_getStorageAt/get-storage-invalid-key (reth) + - eth_getTransactionReceipt/get-access-list (reth) + - eth_getTransactionReceipt/get-blob-tx (reth) + - eth_getTransactionReceipt/get-dynamic-fee (reth) + +# https://github.com/paradigmxyz/reth/issues/8732 +engine-withdrawals: + - Withdrawals Fork On Genesis (Paris) (reth) + - Withdrawals Fork on Block 1 (Paris) (reth) + - Withdrawals Fork on Block 2 (Paris) (reth) + - Withdrawals Fork on Block 3 (Paris) (reth) + - Withdraw to a single account (Paris) (reth) + - Withdraw to two accounts (Paris) (reth) + - Withdraw many accounts (Paris) (reth) + - Withdraw zero amount (Paris) (reth) + - Empty Withdrawals (Paris) (reth) + - Corrupted Block Hash Payload (INVALID) (Paris) (reth) + - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) + - Withdrawals Fork on Block 1 - 8 Block Re-Org, Sync (Paris) (reth) + - Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload (Paris) (reth) + - Withdrawals Fork on Block 8 - 10 Block Re-Org Sync (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org Sync (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) + +# https://github.com/paradigmxyz/reth/issues/8305 +# https://github.com/paradigmxyz/reth/issues/6217 +engine-api: + - Inconsistent Head in ForkchoiceState (Paris) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=True, DynFeeTxs=False (Paris) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Paris) (reth) + - Invalid NewPayload, PrevRandao, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + + # Hive issue + # https://github.com/ethereum/hive/issues/1135 + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + +# https://github.com/paradigmxyz/reth/issues/8305 +# https://github.com/paradigmxyz/reth/issues/6217 +# https://github.com/paradigmxyz/reth/issues/8306 +# https://github.com/paradigmxyz/reth/issues/7144 +engine-cancun: + - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) + - Inconsistent Head in ForkchoiceState (Cancun) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=True, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, PrevRandao, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) + - Invalid NewPayload, ParentBeaconBlockRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Blob Count on BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + + # Hive issue + # https://github.com/ethereum/hive/issues/1135 + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + +# https://github.com/paradigmxyz/reth/issues/8579 +sync: + - sync reth -> reth \ No newline at end of file diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh new file mode 100755 index 000000000000..05e1cb9905fa --- /dev/null +++ b/.github/assets/hive/load_images.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -eo pipefail + +# List of tar files to load +IMAGES=( + "/tmp/hiveproxy.tar" + "/tmp/devp2p.tar" + "/tmp/engine.tar" + "/tmp/rpc_compat.tar" + "/tmp/pyspec.tar" + "/tmp/smoke_genesis.tar" + "/tmp/smoke_network.tar" + "/tmp/ethereum_sync.tar" + "/tmp/reth_image.tar" +) + +# Loop through the images and load them +for IMAGE_TAR in "${IMAGES[@]}"; do + echo "Loading image $IMAGE_TAR..." + docker load -i "$IMAGE_TAR" & +done + +wait + +docker image ls -a \ No newline at end of file diff --git a/.github/assets/hive/no_sim_build.diff b/.github/assets/hive/no_sim_build.diff new file mode 100644 index 000000000000..0b109efe7cd0 --- /dev/null +++ b/.github/assets/hive/no_sim_build.diff @@ -0,0 +1,53 @@ +diff --git a/internal/libdocker/builder.go b/internal/libdocker/builder.go +index 4731c9d..d717f52 100644 +--- a/internal/libdocker/builder.go ++++ b/internal/libdocker/builder.go +@@ -7,9 +7,7 @@ import ( + "fmt" + "io" + "io/fs" +- "os" + "path/filepath" +- "strings" + + "github.com/ethereum/hive/internal/libhive" + docker "github.com/fsouza/go-dockerclient" +@@ -53,24 +51,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes + + // BuildSimulatorImage builds a docker image of a simulator. + func (b *Builder) BuildSimulatorImage(ctx context.Context, name string) (string, error) { +- dir := b.config.Inventory.SimulatorDirectory(name) +- buildContextPath := dir +- buildDockerfile := "Dockerfile" +- // build context dir of simulator can be overridden with "hive_context.txt" file containing the desired build path +- if contextPathBytes, err := os.ReadFile(filepath.Join(filepath.FromSlash(dir), "hive_context.txt")); err == nil { +- buildContextPath = filepath.Join(dir, strings.TrimSpace(string(contextPathBytes))) +- if strings.HasPrefix(buildContextPath, "../") { +- return "", fmt.Errorf("cannot access build directory outside of Hive root: %q", buildContextPath) +- } +- if p, err := filepath.Rel(buildContextPath, filepath.Join(filepath.FromSlash(dir), "Dockerfile")); err != nil { +- return "", fmt.Errorf("failed to derive relative simulator Dockerfile path: %v", err) +- } else { +- buildDockerfile = p +- } +- } + tag := fmt.Sprintf("hive/simulators/%s:latest", name) +- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, nil) +- return tag, err ++ return tag, nil + } + + // BuildImage creates a container by archiving the given file system, +diff --git a/internal/libdocker/proxy.go b/internal/libdocker/proxy.go +index a53e5af..0bb2ea9 100644 +--- a/internal/libdocker/proxy.go ++++ b/internal/libdocker/proxy.go +@@ -16,7 +16,7 @@ const hiveproxyTag = "hive/hiveproxy" + + // Build builds the hiveproxy image. + func (cb *ContainerBackend) Build(ctx context.Context, b libhive.Builder) error { +- return b.BuildImage(ctx, hiveproxyTag, hiveproxy.Source) ++ return nil + } + + // ServeAPI starts the API server. diff --git a/.github/assets/hive/parse.py b/.github/assets/hive/parse.py new file mode 100644 index 000000000000..ee75fdf55317 --- /dev/null +++ b/.github/assets/hive/parse.py @@ -0,0 +1,43 @@ +import json +import yaml +import sys +import argparse + +# Argument parser setup +parser = argparse.ArgumentParser(description="Check for unexpected test results based on an exclusion list.") +parser.add_argument("report_json", help="Path to the hive report JSON file.") +parser.add_argument("--exclusion", required=True, help="Path to the exclusion YAML file.") +args = parser.parse_args() + +# Load hive JSON +with open(args.report_json, 'r') as file: + report = json.load(file) + +# Load exclusion YAML +with open(args.exclusion, 'r') as file: + exclusion_data = yaml.safe_load(file) + exclusions = exclusion_data.get(report['name'], []) + +# Collect unexpected failures and passes +unexpected_failures = [] +unexpected_passes = [] + +for test in report['testCases'].values(): + test_name = test['name'] + test_pass = test['summaryResult']['pass'] + if test_name in exclusions: + if test_pass: + unexpected_passes.append(test_name) + else: + if not test_pass: + unexpected_failures.append(test_name) + +# Check if there are any unexpected failures or passes and exit with error +if unexpected_failures or unexpected_passes: + if unexpected_failures: + print("Unexpected Failures:", unexpected_failures) + if unexpected_passes: + print("Unexpected Passes:", unexpected_passes) + sys.exit(1) + +print("Success.") \ No newline at end of file diff --git a/.github/assets/hive/run_simulator.sh b/.github/assets/hive/run_simulator.sh new file mode 100755 index 000000000000..018077bdca38 --- /dev/null +++ b/.github/assets/hive/run_simulator.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# set -x + +cd hivetests/ + +sim="${1}" +limit="${2}" + +run_hive() { + hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 4 --client reth 2>&1 | tee /tmp/log || true +} + +check_log() { + tail -n 1 /tmp/log | sed -r 's/\x1B\[[0-9;]*[mK]//g' +} + +attempt=0 +max_attempts=5 + +while [ $attempt -lt $max_attempts ]; do + run_hive + + # Check if no tests were run. sed removes ansi colors + if check_log | grep -q "suites=0"; then + echo "no tests were run, retrying in 5 seconds" + sleep 5 + attempt=$((attempt + 1)) + continue + fi + + # Check the last line of the log for "finished", "tests failed", or "test failed" + if check_log | grep -Eq "(finished|tests? failed)"; then + exit 0 + else + exit 1 + fi +done +exit 1 \ No newline at end of file diff --git a/.github/scripts/install_geth.sh b/.github/assets/install_geth.sh similarity index 100% rename from .github/scripts/install_geth.sh rename to .github/assets/install_geth.sh diff --git a/.github/scripts/label_pr.js b/.github/assets/label_pr.js similarity index 100% rename from .github/scripts/label_pr.js rename to .github/assets/label_pr.js diff --git a/.github/workflows/assertoor.yml b/.github/workflows/assertoor.yml index becbf4a3a59b..02931c656348 100644 --- a/.github/workflows/assertoor.yml +++ b/.github/workflows/assertoor.yml @@ -33,7 +33,7 @@ jobs: cat etc/assertoor/assertoor-template.yaml | envsubst > etc/assertoor/assertoor.yaml - kurtosis run github.com/kurtosis-tech/ethereum-package --enclave assertoor-${{ github.run_id }} --args-file etc/assertoor/assertoor.yaml + kurtosis run github.com/ethpandaops/ethereum-package --enclave assertoor-${{ github.run_id }} --args-file etc/assertoor/assertoor.yaml enclave_dump=$(kurtosis enclave inspect assertoor-${{ github.run_id }}) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 9291f7a6cf20..f8d1d475e300 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -49,12 +49,12 @@ jobs: with: ref: ${{ github.base_ref || 'main' }} - name: Generate test vectors - run: cargo run --bin reth -- test-vectors tables + run: cargo run --bin reth --features dev -- test-vectors tables - name: Save baseline - run: cargo bench -p reth-db --bench iai --features test-utils -- --save-baseline=$BASELINE + run: cargo bench -p reth-db --bench iai --profile profiling --features test-utils -- --save-baseline=$BASELINE - name: Checkout PR uses: actions/checkout@v4 with: clean: false - name: Compare PR benchmarks - run: cargo bench -p reth-db --bench iai --features test-utils -- --baseline=$BASELINE + run: cargo bench -p reth-db --bench iai --profile profiling --features test-utils -- --baseline=$BASELINE diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 449908f45078..56d5c427466e 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -78,7 +78,7 @@ jobs: run: mdbook build - name: Build docs - run: cargo docs + run: cargo docs --exclude "example-*" env: # Keep in sync with ./ci.yml:jobs.docs RUSTDOCFLAGS: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 20ae6644b909..2af324a39eb7 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -39,14 +39,10 @@ jobs: docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 docker buildx create --use --name cross-builder - name: Build and push reth image, tag as "latest" - if: ${{ contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push-latest - name: Build and push reth image - if: ${{ ! contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push - name: Build and push op-reth image, tag as "latest" - if: ${{ contains(github.event.ref, 'beta') }} run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest - name: Build and push op-reth image - if: ${{ ! contains(github.event.ref, 'beta') }} run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push diff --git a/.github/workflows/eth-sync.yml b/.github/workflows/eth-sync.yml new file mode 100644 index 000000000000..54c0d96079b9 --- /dev/null +++ b/.github/workflows/eth-sync.yml @@ -0,0 +1,50 @@ +# Runs an ethereum mainnet sync test. + +name: eth-sync-test + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync / 100k blocks + # Only run sync tests in merge groups + if: github.event_name == 'merge_group' + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo install --features asm-keccak,jemalloc --path bin/reth + - name: Run sync + run: | + reth node \ + --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ + --debug.max-block 100000 \ + --debug.terminate + - name: Verify the target block hash + run: | + reth db get static-file headers 100000 \ + | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 + - name: Run stage unwind for 100 blocks + run: | + reth stage unwind num-blocks 100 diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 6151c9569df1..65063dd018d4 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -16,28 +16,47 @@ concurrency: cancel-in-progress: true jobs: - prepare: + prepare-reth: if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 runs-on: group: Reth steps: - uses: actions/checkout@v4 - - run: mkdir artifacts + - run: mkdir artifacts + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo build --features asm-keccak --profile hivetests --bin reth --locked + mkdir dist && cp ./target/hivetests/reth ./dist/reth - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Build and export reth image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . + file: .github/assets/hive/Dockerfile tags: ghcr.io/paradigmxyz/reth:latest - build-args: | - BUILD_PROFILE=hivetests - FEATURES=asm-keccak outputs: type=docker,dest=./artifacts/reth_image.tar cache-from: type=gha cache-to: type=gha,mode=max + - name: Upload reth image + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: ./artifacts + + prepare-hive: + if: github.repository == 'paradigmxyz/reth' + timeout-minutes: 45 + runs-on: + group: Reth + steps: + - uses: actions/checkout@v4 - name: Checkout hive tests uses: actions/checkout@v4 with: @@ -49,29 +68,23 @@ jobs: with: go-version: "^1.13.1" - run: go version - - name: Build hive tool - run: | - cd hivetests - go build . - mv ./hive ../artifacts/ - - name: Upload artifacts + - name: Build hive assets + run: .github/assets/hive/build_simulators.sh + + - name: Upload hive assets uses: actions/upload-artifact@v4 with: - name: artifacts - path: ./artifacts - + name: hive_assets + path: ./hive_assets test: timeout-minutes: 60 strategy: fail-fast: false matrix: - # TODO: enable etherem/sync once resolved: - # https://github.com/paradigmxyz/reth/issues/8579 - # TODO: enable ethereum/rpc once resolved: + # ethereum/rpc to be deprecated: # https://github.com/ethereum/hive/pull/1117 - # sim: [ethereum/rpc, smoke/genesis, smoke/network, ethereum/sync] - sim: [smoke/genesis, smoke/network] + sim: [smoke/genesis, smoke/network, ethereum/sync] include: - sim: devp2p limit: discv4 @@ -98,25 +111,14 @@ jobs: - TestBlobViolations - sim: ethereum/engine limit: engine-exchange-capabilities - # TODO: enable engine-withdrawals once resolved: - # https://github.com/paradigmxyz/reth/issues/8732 - # - sim: ethereum/engine - # limit: engine-withdrawals + - sim: ethereum/engine + limit: engine-withdrawals - sim: ethereum/engine limit: engine-auth - sim: ethereum/engine - limit: engine-transition - # TODO: enable engine-api once resolved: - # https://github.com/paradigmxyz/reth/issues/6217 - # https://github.com/paradigmxyz/reth/issues/8305 - # - sim: ethereum/engine - # limit: engine-api - # TODO: enable cancun once resolved: - # https://github.com/paradigmxyz/reth/issues/6217 - # https://github.com/paradigmxyz/reth/issues/8306 - # https://github.com/paradigmxyz/reth/issues/7144 - # - sim: ethereum/engine - # limit: cancun + limit: engine-api + - sim: ethereum/engine + limit: cancun # eth_ rpc methods - sim: ethereum/rpc-compat include: @@ -137,12 +139,9 @@ jobs: - eth_getTransactionReceipt - eth_sendRawTransaction - eth_syncing - # TODO: enable debug_ rpc-compat once resolved: - # https://github.com/paradigmxyz/reth/issues/7015 - # https://github.com/paradigmxyz/reth/issues/6332 # debug_ rpc methods - # - sim: ethereum/rpc-compat - # include: [debug_] + - sim: ethereum/rpc-compat + include: [debug_] # Pyspec cancun jobs - sim: pyspec include: [cancun/eip4844] @@ -176,23 +175,34 @@ jobs: include: [homestead/] - sim: pyspec include: [frontier/] - needs: prepare + needs: + - prepare-reth + - prepare-hive name: run runs-on: group: Reth permissions: issues: write steps: - - name: Download artifacts + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download hive assets + uses: actions/download-artifact@v4 + with: + name: hive_assets + path: /tmp + + - name: Download reth image uses: actions/download-artifact@v4 with: name: artifacts path: /tmp - - name: Load Docker image - run: | - docker load --input /tmp/reth_image.tar - docker image ls -a + - name: Load Docker images + run: .github/assets/hive/load_images.sh + - name: Move hive binary run: | mv /tmp/hive /usr/local/bin @@ -206,32 +216,12 @@ jobs: path: hivetests - name: Run ${{ matrix.sim }} simulator - run: | - cd hivetests - hive --sim "${{ matrix.sim }}$" --sim.limit "${{matrix.limit}}/${{join(matrix.include, '|')}}" --client reth + run: .github/assets/hive/run_simulator.sh "${{ matrix.sim }}$" "${{matrix.limit}}/${{join(matrix.include, '|')}}" - - name: Create github issue if sim failed - env: - GH_TOKEN: ${{ github.token }} - if: ${{ failure() }} + - name: Parse hive output run: | - echo "Simulator failed, creating issue" - # Check if issue already exists - # get all issues with the label C-hivetest, loop over each page and check if the issue already exists - - existing_issues=$(gh api /repos/paradigmxyz/reth/issues -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" -F "labels=C-hivetest" --method GET | jq '.[].title') - if [[ $existing_issues == *"Hive Test Failure: ${{ matrix.sim }}"* ]]; then - echo "Issue already exists" - exit 0 - fi - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/issues \ - -f title='Hive Test Failure: ${{ matrix.sim }}' \ - -f body="!!!!!!! This is an automated issue created by the hive test failure !!!!!!!

The hive test for ${{ matrix.sim }} failed. Please investigate and fix the issue.

[Link to the failed run](https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }})" \ - -f "labels[]=C-hivetest" + find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml + - name: Print simulator output if: ${{ failure() }} run: | diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 319896154b19..103a87706bca 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -31,7 +31,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - name: Install Geth - run: .github/scripts/install_geth.sh + run: .github/assets/install_geth.sh - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 with: @@ -41,7 +41,7 @@ jobs: run: | cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ - --workspace --exclude examples --exclude ef-tests \ + --workspace --exclude ef-tests \ -E "kind(test)" - if: matrix.network == 'optimism' name: Run tests @@ -49,35 +49,6 @@ jobs: cargo nextest run \ --locked -p reth-node-optimism --features "optimism" - sync: - name: sync / 100k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Run sync - run: | - cargo run --release --features asm-keccak,jemalloc,min-error-logs --bin reth \ - -- node \ - --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ - --debug.max-block 100000 \ - --debug.terminate - - name: Verify the target block hash - run: | - cargo run --release --bin reth \ - -- db get static-file headers 100000 \ - | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 - integration-success: name: integration success runs-on: ubuntu-latest diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml index 857d354a8fb8..07727173531b 100644 --- a/.github/workflows/label-pr.yml +++ b/.github/workflows/label-pr.yml @@ -19,5 +19,5 @@ jobs: uses: actions/github-script@v7 with: script: | - const label_pr = require('./.github/scripts/label_pr.js') + const label_pr = require('./.github/assets/label_pr.js') await label_pr({github, context}) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7011b9555bd5..3aefc21c8389 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -45,6 +45,21 @@ jobs: env: RUSTFLAGS: -D warnings + no-std: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: riscv32imac-unknown-none-elf + - uses: taiki-e/install-action@cargo-hack + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Run no_std checks + run: .github/assets/check_no_std.sh + crate-checks: runs-on: ubuntu-latest timeout-minutes: 30 @@ -72,7 +87,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.76" # MSRV + toolchain: "1.79" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -115,7 +130,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.76" # MSRV + toolchain: "1.79" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -145,11 +160,29 @@ jobs: with: cmd: jq empty etc/grafana/dashboards/overview.json + no-test-deps: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Ensure no arbitrary or proptest dependency on default build + run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 + lint-success: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, book, codespell, grafana] + needs: + - clippy-binaries + - clippy + - crate-checks + - docs + - fmt + - book + - codespell + - grafana + - no-test-deps timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml new file mode 100644 index 000000000000..73303b032d05 --- /dev/null +++ b/.github/workflows/op-sync.yml @@ -0,0 +1,52 @@ +# Runs a base mainnet sync test. + +name: op-sync-test + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: op sync / 10k blocks + # Only run sync tests in merge groups + if: github.event_name == 'merge_group' + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build op-reth + run: | + cargo install --features asm-keccak,jemalloc,optimism --bin op-reth --path bin/reth + - name: Run sync + # https://basescan.org/block/10000 + run: | + op-reth node \ + --chain base \ + --debug.tip 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 \ + --debug.max-block 10000 \ + --debug.terminate + - name: Verify the target block hash + run: | + op-reth db --chain base get static-file headers 10000 \ + | grep 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 + - name: Run stage unwind for 100 blocks + run: | + op-reth stage --chain base unwind num-blocks 100 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 91f65d2bcee7..5735ae6ef528 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -132,7 +132,7 @@ jobs: # https://github.com/openethereum/openethereum/blob/6c2d392d867b058ff867c4373e40850ca3f96969/.github/workflows/build.yml run: | body=$(cat <<- "ENDBODY" - ![image](https://github.com/paradigmxyz/reth/assets/17802178/d02595cf-7130-418f-81a3-ec91f614abf5) + ![image](https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-prod.png) ## Testing Checklist (DELETE ME) @@ -175,7 +175,7 @@ jobs: [See pre-built binaries documentation.](https://paradigmxyz.github.io/reth/installation/binaries.html) - The binaries are signed with the PGP key: `A3AE 097C 8909 3A12 4049 DF1F 5391 A3C4 1005 30B4` + The binaries are signed with the PGP key: `50FB 7CC5 5B2E 8AFA 59FE 03B7 AA5E D56A 7FBF 253E` | System | Architecture | Binary | PGP Signature | |:---:|:---:|:---:|:---| diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 05ff0960916c..a6663aea8843 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -39,7 +39,7 @@ jobs: run: | cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ - --workspace --exclude examples --exclude ef-tests \ + --workspace --exclude ef-tests \ --partition hash:${{ matrix.partition }}/2 \ -E "!kind(test)" diff --git a/CODEOWNERS b/CODEOWNERS index 6ee0cf25a839..225d0f08b174 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,11 +2,13 @@ bin/ @onbjerg crates/blockchain-tree/ @rakita @rkrasiuk @mattsse @Rjected crates/blockchain-tree-api/ @rakita @rkrasiuk @mattsse @Rjected +crates/chainspec/ @Rjected @joshieDo @mattsse crates/cli/ @onbjerg @mattsse crates/config/ @onbjerg crates/consensus/ @rkrasiuk @mattsse @Rjected +crates/engine @rkrasiuk @mattsse @Rjected crates/e2e-test-utils/ @mattsse @Rjected -crates/engine-primitives/ @rkrasiuk @mattsse @Rjected +crates/engine/ @rkrasiuk @mattsse @Rjected @fgimenez crates/errors/ @mattsse crates/ethereum/ @mattsse @Rjected crates/ethereum-forks/ @mattsse @Rjected @@ -22,9 +24,10 @@ crates/node-core/ @mattsse @Rjected @onbjerg crates/optimism/ @mattsse @Rjected @fgimenez crates/payload/ @mattsse @Rjected crates/primitives/ @DaniPopes @Rjected +crates/primitives-traits/ @DaniPopes @Rjected @joshieDo crates/prune/ @shekhirin @joshieDo crates/revm/ @mattsse @rakita -crates/rpc/ @mattsse @Rjected +crates/rpc/ @mattsse @Rjected @emhane crates/stages/ @onbjerg @rkrasiuk @shekhirin crates/static-file/ @joshieDo @shekhirin crates/storage/codecs/ @joshieDo diff --git a/Cargo.lock b/Cargo.lock index 7f0ebed43964..f38e87acff77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,9 +109,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.18" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03fd095a9d70f4b1c5c102c84a4c782867a5c6416dbf6dcd42a63e7c7a89d3c8" +checksum = "1752d7d62e2665da650a36d84abbf239f812534475d51f072a49a533513b7cdd" dependencies = [ "alloy-rlp", "arbitrary", @@ -123,38 +123,26 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da374e868f54c7f4ad2ad56829827badca388efd645f8cf5fccc61c2b5343504" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-serde", "arbitrary", "c-kzg", "proptest", - "proptest-derive", - "serde", -] - -[[package]] -name = "alloy-consensus" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#14ed25d8ab485fc0d313fd1e055862c9d20ef273" -dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "c-kzg", + "proptest-derive 0.4.0", "serde", ] [[package]] name = "alloy-dyn-abi" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6e6436a9530f25010d13653e206fab4c9feddacf21a54de8d7311b275bc56b" +checksum = "413902aa18a97569e60f679c23f46a18db1656d87ab4d4e49d0e1e52042f66df" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -170,63 +158,39 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76ecab54890cdea1e4808fc0891c7e6cfcf71fe1a9fe26810c7280ef768f4ed" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-serde", "arbitrary", "c-kzg", "derive_more", "once_cell", "proptest", - "proptest-derive", - "serde", - "sha2 0.10.8", -] - -[[package]] -name = "alloy-eips" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#14ed25d8ab485fc0d313fd1e055862c9d20ef273" -dependencies = [ - "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "c-kzg", - "once_cell", + "proptest-derive 0.4.0", "serde", "sha2 0.10.8", ] [[package]] name = "alloy-genesis" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" -dependencies = [ - "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "serde", - "serde_json", -] - -[[package]] -name = "alloy-genesis" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bca15afde1b6d15e3fc1c97421262b1bbb37aee45752e3c8b6d6f13f776554ff" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-serde", "serde", - "serde_json", ] [[package]] name = "alloy-json-abi" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaeaccd50238126e3a0ff9387c7c568837726ad4f4e399b528ca88104d6c25ef" +checksum = "bc05b04ac331a9f07e3a4036ef7926e49a8bf84a99a1ccfc7e2ab55a5fcbb372" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -236,8 +200,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d6f34930b7e3e2744bcc79056c217f00cb2abb33bc5d4ff88da7623c5bb078b" dependencies = [ "alloy-primitives", "serde", @@ -248,14 +213,16 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25f6895fc31b48fa12306ef9b4f78b7764f8bd6d7d91cdb0a40e233704a0f23f" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types-eth 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types-eth", + "alloy-serde", "alloy-signer", "alloy-sol-types", "async-trait", @@ -266,10 +233,11 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b2fb0276a78ec13791446a417c2517eee5c8e8a8c520ae0681975b8056e5c" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-genesis", "alloy-primitives", "k256", "serde_json", @@ -281,9 +249,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f783611babedbbe90db3478c120fb5f5daacceffc210b39adc0af4fe0da70bad" +checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" dependencies = [ "alloy-rlp", "arbitrary", @@ -299,7 +267,7 @@ dependencies = [ "k256", "keccak-asm", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "rand 0.8.5", "ruint", "serde", @@ -308,19 +276,21 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c538bfa893d07e27cb4f3c1ab5f451592b7c526d511d62b576a2ce59e146e4a" dependencies = [ "alloy-chains", - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-pubsub", "alloy-rpc-client", - "alloy-rpc-types-eth 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-rpc-types-trace", + "alloy-rpc-types-admin", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-transport", "alloy-transport-http", "alloy-transport-ws", @@ -342,8 +312,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a7341322d9bc0e49f6e9fd9f2eb8e30f73806f2dd12cbb3d6bab2694c921f87" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -360,9 +331,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" +checksum = "a43b18702501396fa9bcdeecd533bc85fac75150d308fc0f6800a01e6234a003" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -371,19 +342,20 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" +checksum = "d83524c1f6162fcb5b0decf775498a125066c86dda6066ed609531b0e912f85a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "alloy-rpc-client" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ba31bae67773fd5a60020bea900231f8396202b7feca4d0c70c6b59308ab4a8" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -405,39 +377,46 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "184a7a42c7ba9141cc9e76368356168c282c3bc3d9e5d78f3556bdfe39343447" dependencies = [ "alloy-rpc-types-engine", - "alloy-rpc-types-eth 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types-eth", + "alloy-rpc-types-trace", + "alloy-serde", ] [[package]] -name = "alloy-rpc-types" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +name = "alloy-rpc-types-admin" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e953064025c49dc9f6a3f3ac07a713487849065692228b33948f2714f2bb60d" dependencies = [ - "alloy-rpc-types-eth 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-genesis", + "alloy-primitives", + "serde", + "serde_json", ] [[package]] name = "alloy-rpc-types-anvil" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c7cf4356a9d00df76d6e90d002e2a7b5edc1c8476e90e6f17ab868d99db6435" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-serde", "serde", ] [[package]] name = "alloy-rpc-types-beacon" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5f2e67d3e2478902b71bbadcd564ee5bbcc71945a0010a1f0e87a2339c6f3f9" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "serde", @@ -447,17 +426,18 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e765962e3b82fd6f276a0873b5bd897e5d75a25f78fa9a6a21bd350d8e98a4e" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types-eth 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types-eth", + "alloy-serde", "jsonrpsee-types", - "jsonwebtoken 9.3.0", + "jsonwebtoken", "rand 0.8.5", "serde", "thiserror", @@ -465,80 +445,84 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4123ee21f99ba4bd31bfa36ba89112a18a500f8b452f02b35708b1b951e2b9" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-serde", "alloy-sol-types", "arbitrary", "itertools 0.13.0", "jsonrpsee-types", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", "serde_json", "thiserror", ] [[package]] -name = "alloy-rpc-types-eth" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +name = "alloy-rpc-types-mev" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd8624e01721deacad6bc9af75abdf2e99d248df0e1ad5f3f0bda0b3c1d50fd" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-eips", "alloy-primitives", - "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "alloy-sol-types", - "itertools 0.13.0", + "alloy-serde", "serde", "serde_json", - "thiserror", ] [[package]] name = "alloy-rpc-types-trace" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567933b1d95fd42cb70b75126e32afec2e5e2c3c16e7100a3f83dc1c80f4dc0e" dependencies = [ "alloy-primitives", - "alloy-rpc-types-eth 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types-eth", + "alloy-serde", "serde", "serde_json", + "thiserror", ] [[package]] -name = "alloy-serde" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +name = "alloy-rpc-types-txpool" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3115f4eb1bb9ae9aaa0b24ce875a1d86d6689b16438a12377832def2b09e373c" dependencies = [ "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", "serde", - "serde_json", ] [[package]] name = "alloy-serde" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9416c52959e66ead795a11f4a86c248410e9e368a0765710e57055b8a1774dd6" dependencies = [ "alloy-primitives", + "arbitrary", + "proptest", + "proptest-derive 0.4.0", "serde", "serde_json", ] [[package]] name = "alloy-signer" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b33753c09fa1ad85e5b092b8dc2372f1e337a42e84b9b4cff9fede75ba4adb32" dependencies = [ "alloy-primitives", "async-trait", @@ -549,11 +533,12 @@ dependencies = [ ] [[package]] -name = "alloy-signer-wallet" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +name = "alloy-signer-local" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dfc9c26fe6c6f1bad818c9a976de9044dd12e1f75f1f156a801ee3e8148c1b6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", "alloy-network", "alloy-primitives", "alloy-signer", @@ -567,23 +552,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bad41a7c19498e3f6079f7744656328699f8ea3e783bdd10d85788cd439f572" +checksum = "2b40397ddcdcc266f59f959770f601ce1280e699a91fc1862f29cef91707cd09" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9899da7d011b4fe4c406a524ed3e3f963797dbc93b45479d60341d3a27b252" +checksum = "867a5469d61480fea08c7333ffeca52d5b621f5ca2e44f271b117ec1fc9a0525" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -593,16 +578,16 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d595768fdc61331a132b6f65db41afae41b9b97d36c21eb1b955c422a7e60" +checksum = "2e482dc33a32b6fadbc0f599adea520bd3aaa585c141a80b404d0a3e3fa72528" dependencies = [ "alloy-json-abi", "const-hex", @@ -611,24 +596,25 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.66", + "syn 2.0.69", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa2fbd22d353d8685bd9fee11ba2d8b5c3b1d11e56adb3265fcf1f32bfdf404" +checksum = "cbcba3ca07cf7975f15d871b721fb18031eec8bce51103907f6dcce00b255d98" dependencies = [ + "serde", "winnow 0.6.13", ] [[package]] name = "alloy-sol-types" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a49042c6d3b66a9fe6b2b5a8bf0d39fc2ae1ee0310a2a26ffedd79fb097878dd" +checksum = "a91ca40fa20793ae9c3841b83e74569d1cc9af29a2f5237314fd3452d51e38c7" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -639,8 +625,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01b51a291f949f755e6165c3ed562883175c97423703703355f4faa4b7d0a57c" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -651,13 +638,15 @@ dependencies = [ "thiserror", "tokio", "tower", + "tracing", "url", ] [[package]] name = "alloy-transport-http" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d65871f9f1cafe1ed25cde2f1303be83e6473e995a2d56c275ae4fcce6119c" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -668,15 +657,36 @@ dependencies = [ "url", ] +[[package]] +name = "alloy-transport-ipc" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7fbc8b6282ce41b01cbddef7bffb133fe6e1bf65dcd39770d45a905c051179" +dependencies = [ + "alloy-json-rpc", + "alloy-pubsub", + "alloy-transport", + "bytes", + "futures", + "interprocess", + "pin-project", + "serde_json", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "alloy-transport-ws" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=14ed25d#14ed25d8ab485fc0d313fd1e055862c9d20ef273" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aec83fd052684556c78c54df111433493267234d82321c2236560c752f595f20" dependencies = [ "alloy-pubsub", "alloy-transport", "futures", "http 1.1.0", + "rustls 0.23.10", "serde_json", "tokio", "tokio-tungstenite", @@ -698,7 +708,7 @@ dependencies = [ "hashbrown 0.14.5", "nybbles", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", "smallvec", "tracing", @@ -791,7 +801,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -1017,20 +1027,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" -dependencies = [ - "concurrent-queue", - "event-listener-strategy", + "event-listener", "futures-core", - "pin-project-lite", ] [[package]] @@ -1045,8 +1043,8 @@ dependencies = [ "memchr", "pin-project-lite", "tokio", - "zstd 0.13.1", - "zstd-safe 7.1.0", + "zstd 0.13.2", + "zstd-safe 7.2.0", ] [[package]] @@ -1055,8 +1053,8 @@ version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ - "async-channel 1.9.0", - "futures-lite 1.13.0", + "async-channel", + "futures-lite", "http-types", "log", "memchr", @@ -1082,24 +1080,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] -[[package]] -name = "async-task" -version = "4.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" - [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -1146,7 +1138,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -1169,9 +1161,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.72" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -1218,36 +1210,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "beacon-api-sidecar-fetcher" -version = "0.1.0" -dependencies = [ - "alloy-rpc-types-beacon", - "clap", - "eyre", - "futures-util", - "reqwest 0.12.5", - "reth", - "reth-node-ethereum", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "beacon-api-sse" -version = "0.0.0" -dependencies = [ - "alloy-rpc-types-beacon", - "clap", - "futures-util", - "mev-share-sse", - "reth", - "reth-node-ethereum", - "tokio", - "tracing", -] - [[package]] name = "bech32" version = "0.9.1" @@ -1265,7 +1227,7 @@ dependencies = [ [[package]] name = "bigquery" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "async-trait", "chrono", @@ -1302,21 +1264,18 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools 0.12.1", "lazy_static", "lazycell", - "log", - "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.66", - "which", + "syn 2.0.69", ] [[package]] @@ -1348,9 +1307,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "arbitrary", "serde", @@ -1358,9 +1317,9 @@ dependencies = [ [[package]] name = "bitm" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b9ea263f0faf826a1c9de0e8bf8f32f5986c05f5e3abcf6bcde74616009586" +checksum = "b06e8e5bec3490b9f6f3adbb78aa4f53e8396fd9994e8a62a346b44ea7c15f35" dependencies = [ "dyn_size_of", ] @@ -1405,19 +1364,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "blocking" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" -dependencies = [ - "async-channel 2.3.1", - "async-task", - "futures-io", - "futures-lite 2.3.0", - "piper", -] - [[package]] name = "blst" version = "0.3.12" @@ -1436,12 +1382,12 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b6fb81ca0f301f33aff7401e2ffab37dc9e0e4a1cf0ccf6b34f4d9e60aa0682" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "boa_interner", "boa_macros", "indexmap 2.2.6", "num-bigint", - "rustc-hash", + "rustc-hash 1.1.0", ] [[package]] @@ -1451,7 +1397,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "600e4e4a65b26efcef08a7b1cf2899d3845a32e82e067ee3b75eaf7e413ff31c" dependencies = [ "arrayvec", - "bitflags 2.5.0", + "bitflags 2.6.0", "boa_ast", "boa_gc", "boa_interner", @@ -1477,7 +1423,7 @@ dependencies = [ "portable-atomic", "rand 0.8.5", "regress", - "rustc-hash", + "rustc-hash 1.1.0", "ryu-js", "serde", "serde_json", @@ -1513,7 +1459,7 @@ dependencies = [ "indexmap 2.2.6", "once_cell", "phf", - "rustc-hash", + "rustc-hash 1.1.0", "static_assertions", ] @@ -1525,7 +1471,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", "synstructure", ] @@ -1535,7 +1481,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8592556849f0619ed142ce2b3a19086769314a8d657f93a5765d06dbce4818" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "boa_ast", "boa_interner", "boa_macros", @@ -1545,7 +1491,7 @@ dependencies = [ "num-bigint", "num-traits", "regress", - "rustc-hash", + "rustc-hash 1.1.0", ] [[package]] @@ -1615,21 +1561,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "bsc-p2p" -version = "0.0.0" -dependencies = [ - "reth-discv4", - "reth-network", - "reth-network-api", - "reth-primitives", - "reth-tracing", - "secp256k1 0.28.2", - "serde_json", - "tokio", - "tokio-stream", -] - [[package]] name = "bstr" version = "0.2.17" @@ -1655,9 +1586,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.16.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" +checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" dependencies = [ "bytemuck_derive", ] @@ -1670,7 +1601,7 @@ checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -1748,18 +1679,18 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" dependencies = [ "rustversion", ] [[package]] name = "cc" -version = "1.0.99" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "066fce287b1d4eafef758e89e09d724a24808a9196fe9756b8ca90e86d0719a2" dependencies = [ "jobserver", "libc", @@ -1799,7 +1730,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -1861,9 +1792,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.7" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f" +checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" dependencies = [ "clap_builder", "clap_derive", @@ -1871,9 +1802,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.7" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" +checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" dependencies = [ "anstream", "anstyle", @@ -1883,14 +1814,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.5" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -2053,10 +1984,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] -name = "const-str" -version = "0.5.7" +name = "const_format" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +dependencies = [ + "const_format_proc_macros", + "konst", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] [[package]] name = "convert_case" @@ -2227,7 +2173,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "crossterm_winapi", "libc", "mio", @@ -2284,6 +2230,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "ctr" version = "0.7.0" @@ -2327,16 +2294,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms", "rustc_version 0.4.0", "subtle", "zeroize", @@ -2350,99 +2316,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] -name = "custom-dev-node" -version = "0.0.0" -dependencies = [ - "eyre", - "futures-util", - "reth", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "serde_json", - "tokio", -] - -[[package]] -name = "custom-engine-types" -version = "0.0.0" -dependencies = [ - "eyre", - "reth", - "reth-basic-payload-builder", - "reth-ethereum-payload-builder", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-payload-builder", - "reth-primitives", - "reth-rpc-types", - "reth-tracing", - "serde", - "thiserror", - "tokio", -] - -[[package]] -name = "custom-evm" -version = "0.0.0" -dependencies = [ - "eyre", - "reth", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-tracing", - "tokio", -] - -[[package]] -name = "custom-inspector" -version = "0.0.0" -dependencies = [ - "clap", - "futures-util", - "reth", - "reth-node-ethereum", -] - -[[package]] -name = "custom-node-components" -version = "0.0.0" -dependencies = [ - "eyre", - "reth", - "reth-node-ethereum", - "reth-tracing", - "reth-transaction-pool", -] - -[[package]] -name = "custom-payload-builder" -version = "0.0.0" -dependencies = [ - "eyre", - "futures-util", - "reth", - "reth-basic-payload-builder", - "reth-ethereum-payload-builder", - "reth-node-api", - "reth-node-ethereum", - "reth-payload-builder", - "reth-primitives", - "tracing", -] - -[[package]] -name = "darling" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +name = "darling" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ "darling_core", "darling_macro", @@ -2459,7 +2340,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -2470,7 +2351,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -2512,17 +2393,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "db-access" -version = "0.0.0" -dependencies = [ - "eyre", - "reth-db", - "reth-primitives", - "reth-provider", - "reth-rpc-types", -] - [[package]] name = "debug-helper" version = "0.3.13" @@ -2587,20 +2457,20 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 1.0.109", + "syn 2.0.69", ] [[package]] @@ -2708,15 +2578,21 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] +[[package]] +name = "doctest-file" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" + [[package]] name = "downcast" version = "0.11.0" @@ -2780,24 +2656,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "educe" -version = "0.4.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" -dependencies = [ - "enum-ordinalize", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "ef-tests" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "rayon", + "reth-chainspec", "reth-db", "reth-db-api", "reth-evm-ethereum", @@ -2812,9 +2677,9 @@ dependencies = [ [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elliptic-curve" @@ -2858,9 +2723,9 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" [[package]] name = "enr" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab656b89cdd15051d92d0931888103508de14ef9e51177c86d478dfa551ce0f" +checksum = "972070166c68827e64bd1ebc8159dd8e32d9bc2da7ebe8f20b61308f7974ad30" dependencies = [ "alloy-rlp", "base64 0.21.7", @@ -2870,7 +2735,7 @@ dependencies = [ "k256", "log", "rand 0.8.5", - "secp256k1 0.28.2", + "secp256k1", "serde", "sha3", "zeroize", @@ -2885,20 +2750,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", -] - -[[package]] -name = "enum-ordinalize" -version = "3.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" -dependencies = [ - "num-bigint", - "num-traits", - "proc-macro2", - "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -2910,7 +2762,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -2921,7 +2773,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -2991,150 +2843,452 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] -name = "event-listener" -version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +name = "example-beacon-api-sidecar-fetcher" +version = "0.1.0" dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", + "alloy-rpc-types-beacon", + "clap", + "eyre", + "futures-util", + "reqwest 0.12.5", + "reth", + "reth-node-ethereum", + "serde", + "serde_json", + "thiserror", ] [[package]] -name = "event-listener-strategy" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +name = "example-beacon-api-sse" +version = "0.0.0" dependencies = [ - "event-listener 5.3.1", - "pin-project-lite", + "alloy-rpc-types-beacon", + "clap", + "futures-util", + "mev-share-sse", + "reth", + "reth-node-ethereum", + "tokio", + "tracing", ] [[package]] -name = "exex-etl" -version = "0.2.0-beta.9" - -[[package]] -name = "exex-in-memory-state" +name = "example-bsc-p2p" version = "0.0.0" dependencies = [ - "eyre", - "reth", - "reth-exex", - "reth-exex-test-utils", - "reth-node-api", - "reth-node-ethereum", - "reth-testing-utils", + "reth-chainspec", + "reth-discv4", + "reth-network", + "reth-network-api", + "reth-network-peers", + "reth-primitives", "reth-tracing", + "secp256k1", + "serde_json", "tokio", + "tokio-stream", ] [[package]] -name = "exex-minimal" +name = "example-custom-dev-node" version = "0.0.0" dependencies = [ "eyre", - "futures", + "futures-util", "reth", - "reth-exex", - "reth-exex-test-utils", - "reth-node-api", + "reth-chainspec", + "reth-node-core", "reth-node-ethereum", - "reth-tracing", + "reth-primitives", + "serde_json", "tokio", ] [[package]] -name = "exex-op-bridge" +name = "example-custom-engine-types" version = "0.0.0" dependencies = [ - "alloy-sol-types", + "alloy-genesis", "eyre", - "futures", - "rand 0.8.5", "reth", - "reth-exex", - "reth-exex-test-utils", + "reth-basic-payload-builder", + "reth-chainspec", + "reth-ethereum-payload-builder", "reth-node-api", + "reth-node-core", "reth-node-ethereum", + "reth-payload-builder", "reth-primitives", - "reth-provider", - "reth-testing-utils", + "reth-rpc-types", "reth-tracing", - "rusqlite", - "tempfile", + "serde", + "thiserror", "tokio", ] [[package]] -name = "exex-rollup" +name = "example-custom-evm" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-rlp", - "alloy-sol-types", + "alloy-genesis", "eyre", - "foundry-blob-explorers", - "once_cell", "reth", - "reth-execution-errors", - "reth-exex", + "reth-chainspec", + "reth-evm-ethereum", "reth-node-api", + "reth-node-core", "reth-node-ethereum", "reth-primitives", - "reth-provider", - "reth-revm", - "reth-testing-utils", "reth-tracing", - "rusqlite", - "secp256k1 0.28.2", - "serde_json", "tokio", ] [[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +name = "example-custom-inspector" +version = "0.0.0" dependencies = [ - "indenter", - "once_cell", + "clap", + "futures-util", + "reth", + "reth-node-ethereum", + "reth-rpc-types", ] [[package]] -name = "fallible-iterator" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" - -[[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" - -[[package]] -name = "fast-float" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95765f67b4b18863968b4a1bd5bb576f732b29a4a28c7cd84c09fa3e2875f33c" +name = "example-custom-node-components" +version = "0.0.0" +dependencies = [ + "eyre", + "reth", + "reth-node-ethereum", + "reth-tracing", + "reth-transaction-pool", +] [[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +name = "example-custom-payload-builder" +version = "0.0.0" dependencies = [ - "instant", + "eyre", + "futures-util", + "reth", + "reth-basic-payload-builder", + "reth-chainspec", + "reth-ethereum-payload-builder", + "reth-node-api", + "reth-node-ethereum", + "reth-payload-builder", + "reth-primitives", + "tracing", ] [[package]] -name = "fastrand" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +name = "example-custom-rlpx-subprotocol" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "rand 0.8.5", + "reth", + "reth-eth-wire", + "reth-network", + "reth-network-api", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-rpc-types", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "example-db-access" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-chainspec", + "reth-db", + "reth-primitives", + "reth-provider", + "reth-rpc-types", +] + +[[package]] +name = "example-exex-discv5" +version = "0.0.0" +dependencies = [ + "clap", + "discv5", + "enr", + "eyre", + "futures", + "futures-util", + "reth", + "reth-chainspec", + "reth-discv5", + "reth-exex", + "reth-exex-test-utils", + "reth-network-peers", + "reth-node-api", + "reth-node-ethereum", + "reth-testing-utils", + "reth-tracing", + "serde_json", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "example-exex-in-memory-state" +version = "0.0.0" +dependencies = [ + "eyre", + "reth", + "reth-execution-types", + "reth-exex", + "reth-exex-test-utils", + "reth-node-api", + "reth-node-ethereum", + "reth-testing-utils", + "reth-tracing", + "tokio", +] + +[[package]] +name = "example-exex-minimal" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth", + "reth-execution-types", + "reth-exex", + "reth-exex-test-utils", + "reth-node-api", + "reth-node-ethereum", + "reth-tracing", + "tokio", +] + +[[package]] +name = "example-exex-op-bridge" +version = "0.0.0" +dependencies = [ + "alloy-sol-types", + "eyre", + "futures", + "rand 0.8.5", + "reth", + "reth-execution-types", + "reth-exex", + "reth-exex-test-utils", + "reth-node-api", + "reth-node-ethereum", + "reth-primitives", + "reth-testing-utils", + "reth-tracing", + "rusqlite", + "tempfile", + "tokio", +] + +[[package]] +name = "example-exex-rollup" +version = "0.0.0" +dependencies = [ + "alloy-consensus", + "alloy-genesis", + "alloy-rlp", + "alloy-sol-types", + "eyre", + "foundry-blob-explorers", + "once_cell", + "reth", + "reth-chainspec", + "reth-execution-errors", + "reth-execution-types", + "reth-exex", + "reth-node-api", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-testing-utils", + "reth-tracing", + "rusqlite", + "secp256k1", + "serde_json", + "tokio", +] + +[[package]] +name = "example-manual-p2p" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "once_cell", + "reth-chainspec", + "reth-discv4", + "reth-ecies", + "reth-eth-wire", + "reth-network", + "reth-network-peers", + "reth-primitives", + "secp256k1", + "tokio", +] + +[[package]] +name = "example-network" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth-network", + "reth-provider", + "tokio", +] + +[[package]] +name = "example-network-txpool" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-network", + "reth-provider", + "reth-transaction-pool", + "tokio", +] + +[[package]] +name = "example-node-custom-rpc" +version = "0.0.0" +dependencies = [ + "clap", + "jsonrpsee", + "reth", + "reth-node-ethereum", + "reth-transaction-pool", + "tokio", +] + +[[package]] +name = "example-node-event-hooks" +version = "0.0.0" +dependencies = [ + "reth", + "reth-node-ethereum", +] + +[[package]] +name = "example-polygon-p2p" +version = "0.0.0" +dependencies = [ + "reth-chainspec", + "reth-discv4", + "reth-network", + "reth-primitives", + "reth-provider", + "reth-tracing", + "secp256k1", + "serde_json", + "tokio", + "tokio-stream", +] + +[[package]] +name = "example-rpc-db" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "jsonrpsee", + "reth", + "reth-chainspec", + "reth-db", + "reth-db-api", + "reth-node-ethereum", + "reth-provider", + "tokio", +] + +[[package]] +name = "example-stateful-precompile" +version = "0.0.0" +dependencies = [ + "alloy-genesis", + "eyre", + "parking_lot 0.12.3", + "reth", + "reth-chainspec", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "schnellru", + "tokio", +] + +[[package]] +name = "example-txpool-tracing" +version = "0.0.0" +dependencies = [ + "clap", + "futures-util", + "reth", + "reth-node-ethereum", +] + +[[package]] +name = "exex-etl" +version = "1.0.1" + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fast-float" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95765f67b4b18863968b4a1bd5bb576f732b29a4a28c7cd84c09fa3e2875f33c" + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -3241,13 +3395,14 @@ dependencies = [ [[package]] name = "foundry-blob-explorers" version = "0.1.0" -source = "git+https://github.com/foundry-rs/block-explorers#d5fdf79cd62f378448907663fc4ba9d085393b35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "195bb5b228e1215c50d828f3e7d48a809a0af2bc0120462710ea5e7fcba3cbe2" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-eips", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-rpc-types-eth", + "alloy-serde", "chrono", "reqwest 0.12.5", "serde", @@ -3329,16 +3484,6 @@ dependencies = [ "waker-fn", ] -[[package]] -name = "futures-lite" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" -dependencies = [ - "futures-core", - "pin-project-lite", -] - [[package]] name = "futures-macro" version = "0.3.30" @@ -3347,7 +3492,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -3405,7 +3550,7 @@ dependencies = [ "async-stream", "async-trait", "dyn-clone", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "log", "reqwest 0.11.27", @@ -3428,7 +3573,7 @@ dependencies = [ "async-stream", "async-trait", "dyn-clone", - "hyper 1.3.1", + "hyper 1.4.0", "log", "reqwest 0.12.5", "serde", @@ -3838,9 +3983,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel 1.9.0", + "async-channel", "base64 0.13.1", - "futures-lite 1.13.0", + "futures-lite", "infer", "pin-project-lite", "rand 0.7.3", @@ -3853,9 +3998,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.2" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3935c160d00ac752e09787e6e6bfc26494c2183cc922f1bc678a60d4733bc2" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -3887,9 +4032,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -3902,7 +4047,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -3911,9 +4056,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" dependencies = [ "bytes", "futures-channel", @@ -3938,7 +4083,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -3954,10 +4099,10 @@ checksum = "399c78f9338483cb7e630c8474b07268983c6bd5acee012e4211f9f7bb21b070" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "log", "rustls 0.22.4", - "rustls-native-certs 0.7.0", + "rustls-native-certs 0.7.1", "rustls-pki-types", "tokio", "tokio-rustls 0.25.0", @@ -3971,30 +4116,30 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.4.0", "hyper-util", "log", - "rustls 0.23.9", - "rustls-native-certs 0.7.0", + "rustls 0.23.10", + "rustls-native-certs 0.7.1", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.2", + "webpki-roots 0.26.3", ] [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.4.0", "pin-project-lite", "socket2 0.5.7", "tokio", @@ -4005,16 +4150,13 @@ dependencies = [ [[package]] name = "iai-callgrind" -version = "0.10.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99bf26f496b13ac6273014f40afda46a233fbfb0289ce50fb4daaad2f2ffc80" +checksum = "146bf76de95f03c5f4b118f0f2f350ef18df47cc0595755bd29d8f668209466c" dependencies = [ "bincode", - "bindgen", - "cc", "iai-callgrind-macros", "iai-callgrind-runner", - "regex", ] [[package]] @@ -4026,14 +4168,14 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "iai-callgrind-runner" -version = "0.10.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c23a951b9eccaa1e38556d27473d1462a9c247a27961812edcaac156af861282" +checksum = "60484b2e469ef4f1af6f196af738889ff375151dd11ac223647ed8a97529107d" dependencies = [ "serde", ] @@ -4132,9 +4274,9 @@ checksum = "e3744fecc0df9ce19999cdaf1f9f3a48c253431ce1d67ef499128fe9d0b607ab" [[package]] name = "icu_properties" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8173ba888885d250016e957b8ebfd5a65cdb690123d8833a19f6833f9c2b579" +checksum = "db9e559598096627aeca8cdfb98138a70eb4078025f8d1d5f2416a361241f756" dependencies = [ "displaydoc", "icu_collections", @@ -4176,7 +4318,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -4197,14 +4339,12 @@ dependencies = [ [[package]] name = "idna" -version = "1.0.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", - "smallvec", - "utf8_iter", + "unicode-bidi", + "unicode-normalization", ] [[package]] @@ -4247,18 +4387,18 @@ dependencies = [ [[package]] name = "include_dir" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd" dependencies = [ "include_dir_macros", ] [[package]] name = "include_dir_macros" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" dependencies = [ "proc-macro2", "quote", @@ -4337,31 +4477,19 @@ dependencies = [ [[package]] name = "interprocess" -version = "1.2.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81f2533f3be42fffe3b5e63b71aeca416c1c3bc33e4e27be018521e76b1f38fb" +checksum = "67bafc2f5dbdad79a6d925649758d5472647b416028099f0b829d1b67fdd47d3" dependencies = [ - "blocking", - "cfg-if", + "doctest-file", "futures-core", - "futures-io", - "intmap", "libc", - "once_cell", - "rustc_version 0.4.0", - "spinning", - "thiserror", - "to_method", + "recvmsg", "tokio", - "winapi", + "widestring", + "windows-sys 0.52.0", ] -[[package]] -name = "intmap" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae52f28f45ac2bc96edb7714de995cffc174a395fb0abf5bff453587c980d7b9" - [[package]] name = "intrusive-collections" version = "0.9.6" @@ -4489,9 +4617,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a130d27083a4001b7b2d72a19f08786299550f76c9bd5307498dce2c2b20fa" +checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4507,9 +4635,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039db9fe25cd63b7221c3f8788c1ef4ea07987d40ec25a1e7d7a3c3e3e3fd130" +checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" dependencies = [ "base64 0.22.1", "futures-channel", @@ -4518,7 +4646,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -4532,9 +4660,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21545a9445fbd582840ff5160a9a3e12b8e6da582151cdb07bde9a1970ba3a24" +checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" dependencies = [ "anyhow", "async-trait", @@ -4549,7 +4677,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project", "rand 0.8.5", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", "thiserror", @@ -4561,19 +4689,19 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb25cab482c8512c4f3323a5c90b95a3b8f7c90681a87bf7a68b942d52f08933" +checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.4.0", "hyper-rustls 0.27.2", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-platform-verifier", "serde", "serde_json", @@ -4586,29 +4714,29 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c18184cd09b386feb18085609e8bf77bdc942482bdd82777b433b8d015edf561" +checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "jsonrpsee-server" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "810f63eff0f78fa8d413d678c0e55b702e2ea61d4587774c0db4ea2fc554ef92" +checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51" dependencies = [ "anyhow", "futures-util", "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.0", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", @@ -4627,9 +4755,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f511b714bca46f9a3e97c0e0eb21d2c112e83e444d2db535b5ec7093f5836d73" +checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" dependencies = [ "beef", "http 1.1.0", @@ -4640,9 +4768,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c8a6dfa0c35c8549fa8e003ce0bbcf37b051ab7ef85fce587e8f0ed7881c84d" +checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4651,29 +4779,15 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786c100eb67df2f2d863d231c2c6978bcf80ff4bf606ffc40e7e68ef562da7bf" -dependencies = [ - "http 1.1.0", - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", - "url", -] - -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.7", - "pem 1.1.1", - "ring 0.16.20", - "serde", - "serde_json", - "simple_asn1", +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" +dependencies = [ + "http 1.1.0", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", + "url", ] [[package]] @@ -4684,8 +4798,8 @@ checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.7", "js-sys", - "pem 3.0.4", - "ring 0.17.8", + "pem", + "ring", "serde", "serde_json", "simple_asn1", @@ -4724,9 +4838,24 @@ dependencies = [ "sha3-asm", ] +[[package]] +name = "konst" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "330f0e13e6483b8c34885f7e6c9f19b1a7bd449c673fbb948a51c99d66ef74f4" +dependencies = [ + "konst_macro_rules", +] + +[[package]] +name = "konst_macro_rules" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" + [[package]] name = "lambda" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "eyre", "reqwest 0.12.5", @@ -4740,11 +4869,11 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] @@ -4834,12 +4963,12 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -4897,15 +5026,14 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.41.2" +version = "0.41.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8130a8269e65a2554d55131c770bdf4bcd94d2b8d4efb24ca23699be65066c05" +checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" dependencies = [ "either", "fnv", "futures", "futures-timer", - "instant", "libp2p-identity", "multiaddr", "multihash", @@ -4921,13 +5049,14 @@ dependencies = [ "tracing", "unsigned-varint 0.8.0", "void", + "web-time", ] [[package]] name = "libp2p-identity" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" dependencies = [ "asn1_der", "bs58", @@ -4982,7 +5111,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", ] @@ -5084,9 +5213,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lru" @@ -5141,23 +5270,6 @@ dependencies = [ "libc", ] -[[package]] -name = "manual-p2p" -version = "0.0.0" -dependencies = [ - "eyre", - "futures", - "once_cell", - "reth-discv4", - "reth-ecies", - "reth-eth-wire", - "reth-network", - "reth-network-peers", - "reth-primitives", - "secp256k1 0.28.2", - "tokio", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -5175,9 +5287,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" @@ -5208,9 +5320,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.22.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2be3cbd384d4e955b231c895ce10685e3d8260c5ccffae898c96c723b0772835" +checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" dependencies = [ "ahash", "portable-atomic", @@ -5218,9 +5330,9 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d58e362dc7206e9456ddbcdbd53c71ba441020e62104703075a69151e38d85f" +checksum = "bf0af7a0d7ced10c0151f870e5e3f3f8bc9ffc5992d32873566ca1f9169ae776" dependencies = [ "base64 0.22.1", "indexmap 2.2.6", @@ -5232,9 +5344,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d8f5027620bf43b86e2c8144beea1e4323aec39241f5eae59dee54f79c6a29" +checksum = "cb524e5438255eaa8aa74214d5a62713b77b2c3c6e3c0bbeee65cfd9a58948ba" dependencies = [ "libproc", "mach2", @@ -5242,14 +5354,14 @@ dependencies = [ "once_cell", "procfs", "rlimit", - "windows 0.56.0", + "windows 0.57.0", ] [[package]] name = "metrics-util" -version = "0.16.3" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b07a5eb561b8cbc16be2d216faf7757f9baf3bfb94dbb0fae3df8387a5bb47f" +checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" dependencies = [ "aho-corasick", "crossbeam-epoch", @@ -5266,9 +5378,9 @@ dependencies = [ [[package]] name = "mev-share-sse" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20cbf228751922258c86a8492b39f987bb22338bef0b09426c106853be0c9fc7" +checksum = "e00cdd87dab765e7dac55c21eb680bfd10655b6c2530f6fe578acdfbb66c757c" dependencies = [ "alloy-primitives", "async-sse", @@ -5292,9 +5404,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -5308,9 +5420,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] @@ -5351,7 +5463,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -5457,28 +5569,6 @@ dependencies = [ "target-features", ] -[[package]] -name = "network" -version = "0.0.0" -dependencies = [ - "eyre", - "futures", - "reth-network", - "reth-provider", - "tokio", -] - -[[package]] -name = "network-txpool" -version = "0.0.0" -dependencies = [ - "eyre", - "reth-network", - "reth-provider", - "reth-transaction-pool", - "tokio", -] - [[package]] name = "nibble_vec" version = "0.1.0" @@ -5499,26 +5589,6 @@ dependencies = [ "libc", ] -[[package]] -name = "node-custom-rpc" -version = "0.0.0" -dependencies = [ - "clap", - "jsonrpsee", - "reth", - "reth-node-ethereum", - "reth-transaction-pool", - "tokio", -] - -[[package]] -name = "node-event-hooks" -version = "0.0.0" -dependencies = [ - "reth", - "reth-node-ethereum", -] - [[package]] name = "nom" version = "7.1.3" @@ -5573,9 +5643,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", @@ -5676,7 +5746,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -5704,9 +5774,9 @@ dependencies = [ [[package]] name = "object" -version = "0.35.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" dependencies = [ "memchr", ] @@ -5719,9 +5789,38 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" + +[[package]] +name = "op-alloy-consensus" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f491085509d77ebd05dbf75592093a9bebc8e7fc642b90fb4ac13b747d48b2fc" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "op-alloy-rpc-types" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "3f26a0cb2f7183c5e51d2806bf4ab9ec050e47c4595deff9bec7f2ba218db9d7" +dependencies = [ + "alloy-network", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "op-alloy-consensus", + "serde", + "serde_json", +] [[package]] name = "opaque-debug" @@ -5743,9 +5842,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" +checksum = "19ff2cf528c6c03d9ed653d6c4ce1dc0582dc4af309790ad92f07c1cd551b0be" dependencies = [ "num-traits", ] @@ -5854,9 +5953,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.1", + "redox_syscall 0.5.2", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -5903,15 +6002,6 @@ dependencies = [ "hmac 0.12.1", ] -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "pem" version = "3.0.4" @@ -5930,9 +6020,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", "thiserror", @@ -5992,7 +6082,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -6021,7 +6111,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -6036,17 +6126,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "piper" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" -dependencies = [ - "atomic-waker", - "fastrand 2.1.0", - "futures-io", -] - [[package]] name = "pkcs8" version = "0.10.2" @@ -6081,12 +6160,6 @@ dependencies = [ "array-init-cursor", ] -[[package]] -name = "platforms" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" - [[package]] name = "plotters" version = "0.3.6" @@ -6154,7 +6227,7 @@ checksum = "08367c014c07fa8f141680e024f926cab3a1fe839605a8fcf2223647eb45ca71" dependencies = [ "ahash", "arrow2", - "bitflags 2.5.0", + "bitflags 2.6.0", "chrono", "comfy-table", "either", @@ -6244,7 +6317,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5110eab438848c981cc5f541fbc5b21bb263fd707000b4715233074fb2630fcf" dependencies = [ "ahash", - "bitflags 2.5.0", + "bitflags 2.6.0", "glob", "once_cell", "polars-arrow", @@ -6394,21 +6467,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22686f4785f02a4fcc856d3b3bb19bf6c8160d103f7a99cc258bddd0251dc7f2" -[[package]] -name = "polygon-p2p" -version = "0.0.0" -dependencies = [ - "reth-discv4", - "reth-network", - "reth-primitives", - "reth-provider", - "reth-tracing", - "secp256k1 0.28.2", - "serde_json", - "tokio", - "tokio-stream", -] - [[package]] name = "polyval" version = "0.5.3" @@ -6494,7 +6552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -6554,9 +6612,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.85" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -6567,7 +6625,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "chrono", "flate2", "hex", @@ -6582,20 +6640,20 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "chrono", "hex", ] [[package]] name = "proptest" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.5.0", + "bitflags 2.6.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -6607,6 +6665,16 @@ dependencies = [ "unarray", ] +[[package]] +name = "proptest-arbitrary-interop" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1981e49bd2432249da8b0e11e5557099a8e74690d6b94e721f7dc0bb7f3555f" +dependencies = [ + "arbitrary", + "proptest", +] + [[package]] name = "proptest-derive" version = "0.4.0" @@ -6618,6 +6686,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "proptest-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.69", +] + [[package]] name = "quanta" version = "0.12.3" @@ -6667,8 +6746,8 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash", - "rustls 0.23.9", + "rustc-hash 1.1.0", + "rustls 0.23.10", "thiserror", "tokio", "tracing", @@ -6682,9 +6761,9 @@ checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" dependencies = [ "bytes", "rand 0.8.5", - "ring 0.17.8", - "rustc-hash", - "rustls 0.23.9", + "ring", + "rustc-hash 1.1.0", + "rustls 0.23.10", "slab", "thiserror", "tinyvec", @@ -6821,19 +6900,20 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef" +checksum = "d16546c5b5962abf8ce6e2881e722b4e0ae3b6f1a08a26ae3573c55853ca68d3" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cassowary", "compact_str", "crossterm", - "itertools 0.12.1", + "itertools 0.13.0", "lru", "paste", "stability", "strum", + "strum_macros 0.26.4", "unicode-segmentation", "unicode-truncate", "unicode-width", @@ -6845,7 +6925,7 @@ version = "11.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e29830cbb1290e404f24c73af91c5d8d631ce7e128691e9477556b540cd01ecd" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] @@ -6868,6 +6948,12 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "recvmsg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" + [[package]] name = "redox_syscall" version = "0.2.16" @@ -6879,11 +6965,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] @@ -6959,7 +7045,7 @@ dependencies = [ [[package]] name = "repository" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "bigquery", "eyre", @@ -6980,7 +7066,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -7020,7 +7106,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.0", "hyper-rustls 0.27.2", "hyper-util", "ipnet", @@ -7031,8 +7117,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.9", - "rustls-native-certs 0.7.0", + "rustls 0.23.10", + "rustls-native-certs 0.7.1", "rustls-pemfile 2.1.2", "rustls-pki-types", "serde", @@ -7048,7 +7134,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.2", + "webpki-roots 0.26.3", "winreg 0.52.0", ] @@ -7064,35 +7150,27 @@ dependencies = [ [[package]] name = "reth" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "ahash", "alloy-rlp", "aquamarine", - "assert_matches", "backon", - "boyer-moore-magiclen", "clap", - "comfy-table", "confy", - "crossterm", "discv5", "eyre", "fdlimit", "futures", - "human_bytes", - "itertools 0.12.1", - "jsonrpsee", + "itertools 0.13.0", "libc", "metrics-process", - "proptest", - "rand 0.8.5", - "ratatui", - "rayon", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", + "reth-chainspec", + "reth-cli-commands", "reth-cli-runner", + "reth-cli-util", "reth-config", "reth-consensus", "reth-consensus-common", @@ -7100,39 +7178,43 @@ dependencies = [ "reth-db-api", "reth-db-common", "reth-discv4", - "reth-discv5", "reth-downloaders", + "reth-engine-util", "reth-errors", "reth-ethereum-payload-builder", "reth-evm", + "reth-execution-types", "reth-exex", "reth-fs-util", - "reth-net-common", "reth-network", "reth-network-api", "reth-network-p2p", - "reth-nippy-jar", "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-ethereum", "reth-node-events", "reth-node-optimism", + "reth-optimism-cli", "reth-optimism-primitives", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", "reth-primitives", "reth-provider", - "reth-prune-types", + "reth-prune", "reth-revm", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", + "reth-rpc-eth-types", + "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", + "reth-stages-api", "reth-static-file", + "reth-static-file-types", "reth-tasks", "reth-tracing", "reth-transaction-pool", @@ -7149,14 +7231,16 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "futures-util", "reth-beacon-consensus", + "reth-chainspec", "reth-consensus", "reth-engine-primitives", "reth-evm", "reth-execution-errors", + "reth-execution-types", "reth-network-p2p", "reth-network-peers", "reth-primitives", @@ -7173,12 +7257,13 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "futures-core", "futures-util", "metrics", + "reth-chainspec", "reth-metrics", "reth-payload-builder", "reth-payload-primitives", @@ -7194,69 +7279,115 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-genesis", "assert_matches", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "metrics", "reth-blockchain-tree", "reth-blockchain-tree-api", + "reth-chainspec", "reth-config", "reth-consensus", "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-engine-primitives", - "reth-errors", - "reth-ethereum-consensus", - "reth-ethereum-engine-primitives", - "reth-evm", - "reth-evm-ethereum", - "reth-exex-types", - "reth-metrics", - "reth-network-p2p", - "reth-payload-builder", - "reth-payload-primitives", - "reth-payload-validator", + "reth-db-api", + "reth-downloaders", + "reth-engine-primitives", + "reth-errors", + "reth-ethereum-consensus", + "reth-ethereum-engine-primitives", + "reth-evm", + "reth-evm-ethereum", + "reth-exex-types", + "reth-metrics", + "reth-network-p2p", + "reth-payload-builder", + "reth-payload-primitives", + "reth-payload-validator", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-prune-types", + "reth-revm", + "reth-rpc", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-stages", + "reth-stages-api", + "reth-static-file", + "reth-tasks", + "reth-testing-utils", + "reth-tokio-util", + "reth-tracing", + "schnellru", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "reth-bench" +version = "1.0.1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types-engine", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "async-trait", + "clap", + "csv", + "eyre", + "futures", + "libc", + "reqwest 0.12.5", + "reth-cli-runner", + "reth-db", + "reth-node-api", + "reth-node-core", "reth-primitives", "reth-provider", - "reth-prune", - "reth-prune-types", - "reth-revm", - "reth-rpc", "reth-rpc-types", "reth-rpc-types-compat", - "reth-stages", - "reth-stages-api", - "reth-static-file", - "reth-tasks", - "reth-testing-utils", - "reth-tokio-util", "reth-tracing", - "schnellru", + "serde", + "serde_json", "thiserror", + "tikv-jemallocator", "tokio", - "tokio-stream", + "tokio-util", + "tower", "tracing", ] [[package]] name = "reth-blockchain-tree" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-genesis", "aquamarine", "assert_matches", "linked_hash_set", "metrics", "parking_lot 0.12.3", "reth-blockchain-tree-api", + "reth-chainspec", "reth-consensus", "reth-db", "reth-db-api", "reth-evm", "reth-evm-ethereum", "reth-execution-errors", + "reth-execution-types", "reth-metrics", "reth-network", "reth-primitives", @@ -7274,7 +7405,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -7283,28 +7414,125 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-chainspec" +version = "1.0.1" +dependencies = [ + "alloy-chains", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "derive_more", + "nybbles", + "once_cell", + "op-alloy-rpc-types", + "rand 0.8.5", + "reth-ethereum-forks", + "reth-network-peers", + "reth-primitives-traits", + "reth-rpc-types", + "reth-trie-common", + "serde", + "serde_json", +] + +[[package]] +name = "reth-cli" +version = "1.0.1" +dependencies = [ + "clap", + "eyre", + "reth-chainspec", + "reth-cli-runner", +] + +[[package]] +name = "reth-cli-commands" +version = "1.0.1" +dependencies = [ + "ahash", + "arbitrary", + "backon", + "clap", + "comfy-table", + "confy", + "crossterm", + "eyre", + "fdlimit", + "human_bytes", + "itertools 0.13.0", + "metrics-process", + "proptest", + "proptest-arbitrary-interop", + "ratatui", + "reth-beacon-consensus", + "reth-chainspec", + "reth-cli-runner", + "reth-cli-util", + "reth-config", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-db-common", + "reth-downloaders", + "reth-evm", + "reth-exex", + "reth-fs-util", + "reth-network", + "reth-network-p2p", + "reth-node-core", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-stages", + "reth-static-file", + "reth-static-file-types", + "reth-trie", + "serde", + "serde_json", + "tokio", + "toml", + "tracing", +] + [[package]] name = "reth-cli-runner" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "reth-tasks", "tokio", "tracing", ] +[[package]] +name = "reth-cli-util" +version = "1.0.1" +dependencies = [ + "eyre", + "proptest", + "reth-fs-util", + "reth-network", + "reth-primitives", + "secp256k1", + "thiserror", +] + [[package]] name = "reth-codecs" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", + "alloy-genesis", "alloy-primitives", "arbitrary", "bytes", "modular-bitfield", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "reth-codecs-derive", "serde", "serde_json", @@ -7313,23 +7541,24 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "reth-config" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "confy", "humantime-serde", - "reth-network", + "reth-network-types", "reth-prune-types", + "reth-stages-types", "serde", "tempfile", "toml", @@ -7337,19 +7566,20 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "auto_impl", "reth-primitives", - "thiserror", + "thiserror-no-std", ] [[package]] name = "reth-consensus-common" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "mockall", "rand 0.8.5", + "reth-chainspec", "reth-consensus", "reth-primitives", "reth-storage-api", @@ -7357,10 +7587,10 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", "alloy-provider", "auto_impl", "eyre", @@ -7379,7 +7609,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "arbitrary", "assert_matches", @@ -7400,12 +7630,13 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-errors", "reth-tracing", - "reth-trie-types", - "rustc-hash", + "reth-trie-common", + "rustc-hash 2.0.0", "serde", "serde_json", "strum", @@ -7417,7 +7648,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "arbitrary", "assert_matches", @@ -7431,14 +7662,16 @@ dependencies = [ "paste", "pprof", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-errors", - "reth-trie-types", + "reth-trie-common", "serde", "serde_json", "test-fuzz", @@ -7446,15 +7679,20 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-genesis", + "boyer-moore-magiclen", "eyre", + "reth-chainspec", "reth-codecs", "reth-config", "reth-db", "reth-db-api", "reth-etl", + "reth-fs-util", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-stages-types", "reth-trie", @@ -7466,8 +7704,9 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-primitives", "alloy-rlp", "assert_matches", "discv5", @@ -7475,13 +7714,14 @@ dependencies = [ "generic-array", "parking_lot 0.12.3", "rand 0.8.5", - "reth-net-common", + "reth-chainspec", + "reth-ethereum-forks", + "reth-net-banlist", "reth-net-nat", "reth-network-peers", - "reth-primitives", "reth-tracing", "schnellru", - "secp256k1 0.28.2", + "secp256k1", "serde", "thiserror", "tokio", @@ -7491,23 +7731,25 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-primitives", "alloy-rlp", "derive_more", "discv5", "enr", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "libp2p-identity", "metrics", "multiaddr", "rand 0.8.5", + "reth-chainspec", + "reth-ethereum-forks", "reth-metrics", "reth-network-peers", - "reth-primitives", "reth-tracing", - "secp256k1 0.28.2", + "secp256k1", "thiserror", "tokio", "tracing", @@ -7515,20 +7757,23 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-chains", + "alloy-primitives", "alloy-rlp", "data-encoding", "enr", "linked_hash_set", "parking_lot 0.12.3", "rand 0.8.5", - "reth-net-common", + "reth-chainspec", + "reth-ethereum-forks", "reth-network-peers", - "reth-primitives", + "reth-tokio-util", "reth-tracing", "schnellru", - "secp256k1 0.28.2", + "secp256k1", "serde", "serde_with", "thiserror", @@ -7540,17 +7785,18 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "assert_matches", "futures", "futures-util", - "itertools 0.12.1", + "itertools 0.13.0", "metrics", "pin-project", "rand 0.8.5", "rayon", + "reth-chainspec", "reth-config", "reth-consensus", "reth-db", @@ -7560,6 +7806,7 @@ dependencies = [ "reth-network-peers", "reth-primitives", "reth-provider", + "reth-storage-api", "reth-tasks", "reth-testing-utils", "reth-tracing", @@ -7573,18 +7820,20 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types", "alloy-signer", - "alloy-signer-wallet", + "alloy-signer-local", "eyre", "futures-util", "jsonrpsee", "reth", + "reth-chainspec", "reth-db", + "reth-network-peers", "reth-node-builder", "reth-payload-builder", "reth-primitives", @@ -7602,9 +7851,10 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "aes 0.8.4", + "alloy-primitives", "alloy-rlp", "block-padding", "byteorder", @@ -7612,16 +7862,13 @@ dependencies = [ "concat-kdf", "ctr 0.9.2", "digest 0.10.7", - "educe", "futures", "generic-array", "hmac 0.12.1", "pin-project", "rand 0.8.5", - "reth-net-common", "reth-network-peers", - "reth-primitives", - "secp256k1 0.28.2", + "secp256k1", "sha2 0.10.8", "sha3", "thiserror", @@ -7634,16 +7881,78 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "0.2.0-beta.9" +version = "1.0.1" +dependencies = [ + "reth-chainspec", + "reth-payload-primitives", + "serde", +] + +[[package]] +name = "reth-engine-tree" +version = "1.0.1" dependencies = [ + "aquamarine", + "assert_matches", + "futures", + "metrics", + "parking_lot 0.12.3", + "reth-beacon-consensus", + "reth-blockchain-tree", + "reth-blockchain-tree-api", + "reth-chainspec", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-engine-primitives", + "reth-errors", + "reth-ethereum-consensus", + "reth-evm", + "reth-metrics", + "reth-network-p2p", + "reth-payload-builder", "reth-payload-primitives", + "reth-payload-validator", "reth-primitives", + "reth-provider", + "reth-prune", + "reth-prune-types", + "reth-revm", + "reth-rpc-types", + "reth-stages", + "reth-stages-api", + "reth-static-file", + "reth-tasks", + "reth-tokio-util", + "reth-tracing", + "reth-trie", + "revm", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "reth-engine-util" +version = "1.0.1" +dependencies = [ + "eyre", + "futures", + "pin-project", + "reth-beacon-consensus", + "reth-engine-primitives", + "reth-fs-util", + "reth-rpc", + "reth-rpc-types", "serde", + "serde_json", + "tokio-util", + "tracing", ] [[package]] name = "reth-errors" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7655,7 +7964,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "arbitrary", @@ -7665,18 +7974,18 @@ dependencies = [ "futures", "pin-project", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", + "reth-chainspec", "reth-codecs", - "reth-discv4", "reth-ecies", "reth-eth-wire-types", "reth-metrics", - "reth-net-common", "reth-network-peers", "reth-primitives", "reth-tracing", - "secp256k1 0.28.2", + "secp256k1", "serde", "snap", "test-fuzz", @@ -7689,42 +7998,66 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-chains", + "alloy-genesis", "alloy-rlp", "arbitrary", - "async-stream", "bytes", "derive_more", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", + "reth-chainspec", "reth-codecs-derive", - "reth-net-common", "reth-primitives", - "reth-tracing", "serde", - "test-fuzz", "thiserror", - "tokio-util", ] +[[package]] +name = "reth-ethereum-cli" +version = "1.0.1" + [[package]] name = "reth-ethereum-consensus" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "reth-chainspec", "reth-consensus", "reth-consensus-common", "reth-primitives", "tracing", ] +[[package]] +name = "reth-ethereum-engine" +version = "1.0.1" +dependencies = [ + "futures", + "pin-project", + "reth-beacon-consensus", + "reth-chainspec", + "reth-db-api", + "reth-engine-tree", + "reth-ethereum-engine-primitives", + "reth-network-p2p", + "reth-stages-api", + "reth-tasks", + "tokio", + "tokio-stream", +] + [[package]] name = "reth-ethereum-engine-primitives" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", + "reth-chainspec", "reth-engine-primitives", + "reth-evm-ethereum", "reth-payload-primitives", "reth-primitives", "reth-rpc-types", @@ -7737,27 +8070,32 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-chains", "alloy-primitives", "alloy-rlp", "arbitrary", + "auto_impl", "crc", + "dyn-clone", + "once_cell", "proptest", - "proptest-derive", + "proptest-derive 0.5.0", + "rustc-hash 2.0.0", "serde", - "thiserror", + "thiserror-no-std", ] [[package]] name = "reth-ethereum-payload-builder" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "reth-basic-payload-builder", "reth-errors", "reth-evm", "reth-evm-ethereum", + "reth-execution-types", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7769,21 +8107,23 @@ dependencies = [ [[package]] name = "reth-etl" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-primitives", "rayon", "reth-db-api", - "reth-primitives", "tempfile", ] [[package]] name = "reth-evm" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-eips", "auto_impl", "futures-util", "parking_lot 0.12.3", + "reth-chainspec", "reth-execution-errors", "reth-execution-types", "reth-primitives", @@ -7795,11 +8135,13 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-eips", "alloy-sol-types", + "reth-chainspec", "reth-ethereum-consensus", + "reth-ethereum-forks", "reth-evm", "reth-execution-types", "reth-primitives", @@ -7807,20 +8149,22 @@ dependencies = [ "reth-revm", "reth-testing-utils", "revm-primitives", - "secp256k1 0.28.2", + "secp256k1", "serde_json", ] [[package]] name = "reth-evm-optimism" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "reth-chainspec", "reth-consensus-common", + "reth-ethereum-forks", "reth-evm", "reth-execution-errors", + "reth-execution-types", "reth-optimism-consensus", "reth-primitives", - "reth-provider", "reth-prune-types", "reth-revm", "revm", @@ -7831,34 +8175,44 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-eips", + "alloy-primitives", "reth-consensus", - "reth-primitives", "reth-prune-types", "reth-storage-errors", - "thiserror", + "revm-primitives", + "thiserror-no-std", ] [[package]] name = "reth-execution-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-eips", "alloy-primitives", + "reth-chainspec", "reth-execution-errors", "reth-primitives", "reth-trie", "revm", + "serde", ] [[package]] name = "reth-exex" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "eyre", "metrics", + "reth-blockchain-tree", + "reth-chainspec", "reth-config", + "reth-db-api", + "reth-db-common", + "reth-evm", + "reth-evm-ethereum", "reth-exex-types", "reth-metrics", "reth-network", @@ -7866,25 +8220,35 @@ dependencies = [ "reth-node-core", "reth-payload-builder", "reth-primitives", + "reth-primitives-traits", "reth-provider", + "reth-prune-types", + "reth-revm", + "reth-stages-api", "reth-tasks", + "reth-testing-utils", "reth-tracing", + "secp256k1", + "serde", "tokio", "tokio-util", ] [[package]] name = "reth-exex-test-utils" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "eyre", "futures-util", "rand 0.8.5", "reth-blockchain-tree", + "reth-chainspec", "reth-config", + "reth-consensus", "reth-db", "reth-db-common", "reth-evm", + "reth-execution-types", "reth-exex", "reth-network", "reth-node-api", @@ -7902,35 +8266,31 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-exexed" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "bigquery", "exex-etl", "eyre", - "futures", "lambda", "repository", "reth", "reth-exex", - "reth-node-api", "reth-node-ethereum", "reth-tracing", - "serde", "serde_json", - "tokio", "types", ] [[package]] name = "reth-fs-util" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "serde_json", "thiserror", @@ -7938,7 +8298,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "async-trait", "bytes", @@ -7960,15 +8320,14 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "byteorder", "criterion", "dashmap", "derive_more", "indexmap 2.2.6", - "libc", "parking_lot 0.12.3", "pprof", "rand 0.8.5", @@ -7981,16 +8340,15 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "bindgen", "cc", - "libc", ] [[package]] name = "reth-metrics" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "futures", "metrics", @@ -8001,7 +8359,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "metrics", "once_cell", @@ -8009,21 +8367,20 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.66", + "syn 2.0.69", "trybuild", ] [[package]] -name = "reth-net-common" -version = "0.2.0-beta.9" +name = "reth-net-banlist" +version = "1.0.1" dependencies = [ "alloy-primitives", - "tokio", ] [[package]] name = "reth-net-nat" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "futures-util", "reqwest 0.12.5", @@ -8035,7 +8392,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -8046,36 +8403,39 @@ dependencies = [ "derive_more", "discv5", "enr", - "fnv", "futures", "humantime-serde", - "itertools 0.12.1", + "itertools 0.13.0", "metrics", "parking_lot 0.12.3", "pin-project", "pprof", "rand 0.8.5", + "reth-chainspec", "reth-consensus", "reth-discv4", "reth-discv5", "reth-dns-discovery", "reth-ecies", "reth-eth-wire", + "reth-fs-util", "reth-metrics", - "reth-net-common", + "reth-net-banlist", "reth-network", "reth-network-api", "reth-network-p2p", "reth-network-peers", + "reth-network-types", "reth-primitives", "reth-provider", - "reth-rpc-types", + "reth-storage-api", "reth-tasks", "reth-tokio-util", "reth-tracing", "reth-transaction-pool", + "rustc-hash 2.0.0", "schnellru", - "secp256k1 0.28.2", + "secp256k1", "serde", "serde_json", "serial_test", @@ -8090,13 +8450,13 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", + "alloy-rpc-types-admin", "enr", "reth-eth-wire", "reth-network-peers", - "reth-rpc-types", "serde", "thiserror", "tokio", @@ -8104,7 +8464,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "auto_impl", "futures", @@ -8122,13 +8482,13 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", "alloy-rlp", "enr", "rand 0.8.5", - "secp256k1 0.28.2", + "secp256k1", "serde_json", "serde_with", "thiserror", @@ -8136,16 +8496,29 @@ dependencies = [ "url", ] +[[package]] +name = "reth-network-types" +version = "1.0.1" +dependencies = [ + "humantime-serde", + "reth-net-banlist", + "reth-network-api", + "reth-network-peers", + "serde", + "serde_json", + "tracing", +] + [[package]] name = "reth-nippy-jar" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "anyhow", "bincode", "cuckoofilter", "derive_more", "lz4_flex", - "memmap2 0.7.1", + "memmap2 0.9.4", "ph", "rand 0.8.5", "reth-fs-util", @@ -8154,12 +8527,12 @@ dependencies = [ "tempfile", "thiserror", "tracing", - "zstd 0.13.1", + "zstd 0.13.2", ] [[package]] name = "reth-node-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "reth-db-api", "reth-engine-primitives", @@ -8174,12 +8547,11 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "aquamarine", "backon", "confy", - "discv5", "eyre", "fdlimit", "futures", @@ -8187,6 +8559,8 @@ dependencies = [ "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-blockchain-tree", + "reth-chainspec", + "reth-cli-util", "reth-config", "reth-consensus", "reth-consensus-debug-client", @@ -8194,6 +8568,7 @@ dependencies = [ "reth-db-api", "reth-db-common", "reth-downloaders", + "reth-engine-util", "reth-evm", "reth-exex", "reth-network", @@ -8215,19 +8590,21 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", - "secp256k1 0.28.2", + "secp256k1", "tempfile", "tokio", "tokio-stream", + "tracing", ] [[package]] name = "reth-node-core" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-genesis", "alloy-rpc-types-engine", "clap", - "const-str", + "const_format", "derive_more", "dirs-next", "eyre", @@ -8240,18 +8617,17 @@ dependencies = [ "metrics-process", "metrics-util", "once_cell", - "pin-project", "procfs", "proptest", "rand 0.8.5", - "reth-beacon-consensus", + "reth-chainspec", + "reth-cli-util", "reth-config", "reth-consensus-common", "reth-db", "reth-db-api", "reth-discv4", "reth-discv5", - "reth-engine-primitives", "reth-fs-util", "reth-metrics", "reth-net-nat", @@ -8261,8 +8637,9 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune-types", - "reth-rpc", "reth-rpc-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", @@ -8271,14 +8648,11 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", - "secp256k1 0.28.2", - "serde", + "secp256k1", "serde_json", "shellexpand", - "thiserror", "tikv-jemalloc-ctl", "tokio", - "tokio-util", "tower", "tracing", "vergen", @@ -8286,13 +8660,19 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-genesis", + "alloy-primitives", "eyre", "futures", "futures-util", "reth", + "reth-auto-seal-consensus", "reth-basic-payload-builder", + "reth-beacon-consensus", + "reth-chainspec", + "reth-consensus", "reth-db", "reth-e2e-test-utils", "reth-ethereum-engine-primitives", @@ -8304,7 +8684,6 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-payload-builder", - "reth-primitives", "reth-provider", "reth-tracing", "reth-transaction-pool", @@ -8314,8 +8693,9 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-rpc-types-engine", "futures", "humantime", "pin-project", @@ -8324,9 +8704,9 @@ dependencies = [ "reth-network", "reth-network-api", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", - "reth-rpc-types", "reth-stages", "reth-static-file", "tokio", @@ -8335,18 +8715,23 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-genesis", "alloy-primitives", "async-trait", "clap", "eyre", "jsonrpsee", + "jsonrpsee-types", "parking_lot 0.12.3", "reqwest 0.12.5", "reth", + "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", + "reth-chainspec", + "reth-consensus", "reth-db", "reth-discv5", "reth-e2e-test-utils", @@ -8355,12 +8740,15 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-optimism-consensus", "reth-optimism-payload-builder", "reth-payload-builder", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-types", "reth-rpc-types-compat", "reth-tracing", @@ -8373,10 +8761,43 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-cli" +version = "1.0.1" +dependencies = [ + "alloy-primitives", + "clap", + "eyre", + "futures-util", + "reth-cli-commands", + "reth-config", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-downloaders", + "reth-errors", + "reth-evm-optimism", + "reth-execution-types", + "reth-network-p2p", + "reth-node-core", + "reth-node-events", + "reth-optimism-primitives", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-stages", + "reth-stages-types", + "reth-static-file", + "reth-static-file-types", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-consensus" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "reth-chainspec", "reth-consensus", "reth-consensus-common", "reth-primitives", @@ -8385,12 +8806,14 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", + "reth-chainspec", "reth-evm", "reth-evm-optimism", + "reth-execution-types", "reth-payload-builder", "reth-payload-primitives", "reth-primitives", @@ -8407,15 +8830,32 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "0.2.0-beta.9" +version = "1.0.1" + +[[package]] +name = "reth-optimism-rpc" +version = "1.0.1" +dependencies = [ + "alloy-primitives", + "parking_lot 0.12.3", + "reth-chainspec", + "reth-errors", + "reth-evm", + "reth-provider", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-types", + "reth-tasks", + "reth-transaction-pool", + "tokio", +] [[package]] name = "reth-payload-builder" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "futures-util", "metrics", - "reth-engine-primitives", "reth-errors", "reth-ethereum-engine-primitives", "reth-metrics", @@ -8434,8 +8874,9 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "reth-chainspec", "reth-errors", "reth-primitives", "reth-rpc-types", @@ -8447,8 +8888,9 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "reth-chainspec", "reth-primitives", "reth-rpc-types", "reth-rpc-types-compat", @@ -8456,88 +8898,93 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-chains", - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-eips", + "alloy-genesis", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types", "alloy-trie", "arbitrary", "assert_matches", - "byteorder", "bytes", "c-kzg", "criterion", "derive_more", - "hash-db", - "itertools 0.12.1", "modular-bitfield", "nybbles", "once_cell", - "plain_hasher", "pprof", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "rayon", + "reth-chainspec", "reth-codecs", "reth-ethereum-forks", - "reth-network-peers", "reth-primitives-traits", "reth-static-file-types", - "reth-trie-types", - "revm", + "reth-trie-common", "revm-primitives", - "roaring", - "secp256k1 0.28.2", + "secp256k1", "serde", "serde_json", "sucds", "tempfile", "test-fuzz", - "thiserror", + "thiserror-no-std", "toml", "triehash", - "zstd 0.13.1", + "zstd 0.13.2", ] [[package]] name = "reth-primitives-traits" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-consensus", + "alloy-eips", + "alloy-genesis", "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-eth", "arbitrary", + "byteorder", "bytes", + "derive_more", "modular-bitfield", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", + "rand 0.8.5", "reth-codecs", + "revm-primitives", + "roaring", "serde", + "serde_json", "test-fuzz", + "thiserror-no-std", ] [[package]] name = "reth-provider" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", "auto_impl", "dashmap", - "itertools 0.12.1", + "itertools 0.13.0", "metrics", "parking_lot 0.12.3", "pin-project", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", + "reth-chainspec", "reth-codecs", "reth-db", "reth-db-api", @@ -8565,26 +9012,28 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", "assert_matches", - "itertools 0.12.1", + "itertools 0.13.0", "metrics", "rayon", + "reth-chainspec", "reth-config", "reth-db", "reth-db-api", "reth-errors", "reth-exex-types", "reth-metrics", - "reth-primitives", "reth-provider", "reth-prune-types", "reth-stages", + "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", "reth-tracing", + "rustc-hash 2.0.0", "thiserror", "tokio", "tracing", @@ -8592,7 +9041,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -8601,7 +9050,8 @@ dependencies = [ "derive_more", "modular-bitfield", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "reth-codecs", "serde", "serde_json", @@ -8612,10 +9062,10 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-rlp", + "alloy-eips", + "reth-chainspec", "reth-consensus-common", "reth-execution-errors", "reth-primitives", @@ -8629,32 +9079,31 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-dyn-abi", + "alloy-genesis", "alloy-primitives", "alloy-rlp", - "alloy-sol-types", "assert_matches", "async-trait", "derive_more", - "dyn-clone", "futures", "http 1.1.0", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.4.0", "jsonrpsee", - "jsonwebtoken 8.3.0", - "metrics", + "jsonrpsee-types", + "jsonwebtoken", "parking_lot 0.12.3", "pin-project", "rand 0.8.5", + "reth-chainspec", "reth-consensus-common", "reth-errors", "reth-evm", "reth-evm-ethereum", "reth-evm-optimism", - "reth-metrics", "reth-network-api", "reth-network-peers", "reth-primitives", @@ -8662,6 +9111,8 @@ dependencies = [ "reth-revm", "reth-rpc-api", "reth-rpc-engine-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", @@ -8671,8 +9122,7 @@ dependencies = [ "revm", "revm-inspectors", "revm-primitives", - "schnellru", - "secp256k1 0.28.2", + "secp256k1", "serde", "serde_json", "tempfile", @@ -8686,12 +9136,13 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "jsonrpsee", "reth-engine-primitives", "reth-network-peers", "reth-primitives", + "reth-rpc-eth-api", "reth-rpc-types", "serde", "serde_json", @@ -8699,12 +9150,13 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "futures", "jsonrpsee", "reth-primitives", "reth-rpc-api", + "reth-rpc-eth-api", "reth-rpc-types", "serde_json", "similar-asserts", @@ -8713,7 +9165,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "clap", "http 1.1.0", @@ -8721,6 +9173,7 @@ dependencies = [ "metrics", "pin-project", "reth-beacon-consensus", + "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-evm", @@ -8728,6 +9181,7 @@ dependencies = [ "reth-ipc", "reth-metrics", "reth-network-api", + "reth-network-peers", "reth-node-core", "reth-payload-builder", "reth-primitives", @@ -8735,6 +9189,8 @@ dependencies = [ "reth-rpc", "reth-rpc-api", "reth-rpc-engine-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-layer", "reth-rpc-server-types", "reth-rpc-types", @@ -8754,7 +9210,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "assert_matches", @@ -8763,30 +9219,99 @@ dependencies = [ "jsonrpsee-types", "metrics", "reth-beacon-consensus", + "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-evm", "reth-metrics", - "reth-payload-builder", - "reth-payload-primitives", + "reth-payload-builder", + "reth-payload-primitives", + "reth-primitives", + "reth-provider", + "reth-rpc-api", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-storage-api", + "reth-tasks", + "reth-testing-utils", + "reth-tokio-util", + "serde", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "reth-rpc-eth-api" +version = "1.0.1" +dependencies = [ + "alloy-dyn-abi", + "async-trait", + "auto_impl", + "dyn-clone", + "futures", + "jsonrpsee", + "parking_lot 0.12.3", + "reth-chainspec", + "reth-errors", + "reth-evm", + "reth-execution-types", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-tasks", + "reth-transaction-pool", + "revm", + "revm-inspectors", + "revm-primitives", + "tokio", + "tracing", +] + +[[package]] +name = "reth-rpc-eth-types" +version = "1.0.1" +dependencies = [ + "alloy-sol-types", + "derive_more", + "futures", + "jsonrpsee-core", + "jsonrpsee-types", + "metrics", + "rand 0.8.5", + "reth-chainspec", + "reth-errors", + "reth-evm", + "reth-execution-types", + "reth-metrics", "reth-primitives", "reth-provider", - "reth-rpc-api", + "reth-revm", + "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", - "reth-storage-api", "reth-tasks", - "reth-testing-utils", - "reth-tokio-util", + "reth-transaction-pool", + "reth-trie", + "revm", + "revm-inspectors", + "revm-primitives", + "schnellru", "serde", + "serde_json", "thiserror", "tokio", + "tokio-stream", "tracing", ] [[package]] name = "reth-rpc-layer" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rpc-types-engine", "assert_matches", @@ -8803,28 +9328,38 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", + "jsonrpsee-core", + "jsonrpsee-types", + "reth-errors", + "reth-network-api", + "reth-primitives", + "reth-rpc-types", "serde", "strum", ] [[package]] name = "reth-rpc-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types", + "alloy-rpc-types-admin", "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", + "alloy-rpc-types-mev", "alloy-rpc-types-trace", + "alloy-rpc-types-txpool", + "alloy-serde", "arbitrary", "bytes", "jsonrpsee-types", "proptest", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "serde", "serde_json", @@ -8833,29 +9368,31 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-rpc-types", "reth-primitives", "reth-rpc-types", + "reth-trie-common", "serde_json", ] [[package]] name = "reth-stages" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "assert_matches", "criterion", "futures-util", - "itertools 0.12.1", + "itertools 0.13.0", "num-traits", "paste", "pprof", "rand 0.8.5", "rayon", + "reth-chainspec", "reth-codecs", "reth-config", "reth-consensus", @@ -8866,10 +9403,12 @@ dependencies = [ "reth-evm", "reth-evm-ethereum", "reth-execution-errors", + "reth-execution-types", "reth-exex", "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-revm", @@ -8887,8 +9426,9 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-primitives", "aquamarine", "assert_matches", "auto_impl", @@ -8899,11 +9439,12 @@ dependencies = [ "reth-errors", "reth-metrics", "reth-network-p2p", - "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-stages-types", "reth-static-file", + "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", "thiserror", @@ -8914,35 +9455,38 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", "arbitrary", "bytes", "modular-bitfield", "proptest", - "proptest-derive", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", - "reth-trie-types", + "reth-trie-common", "serde", "test-fuzz", ] [[package]] name = "reth-static-file" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "alloy-primitives", "assert_matches", "parking_lot 0.12.3", "rayon", "reth-db", "reth-db-api", "reth-nippy-jar", - "reth-primitives", "reth-provider", "reth-prune-types", "reth-stages", + "reth-stages-types", + "reth-static-file-types", "reth-storage-errors", "reth-testing-utils", "reth-tokio-util", @@ -8952,7 +9496,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", "clap", @@ -8963,9 +9507,10 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "auto_impl", + "reth-chainspec", "reth-db-api", "reth-execution-types", "reth-primitives", @@ -8978,17 +9523,18 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "reth-fs-util", "reth-primitives", - "thiserror", + "thiserror-no-std", ] [[package]] name = "reth-tasks" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ + "auto_impl", "dyn-clone", "futures-util", "metrics", @@ -9003,17 +9549,17 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", + "alloy-genesis", "rand 0.8.5", "reth-primitives", - "secp256k1 0.28.2", + "secp256k1", ] [[package]] name = "reth-tokio-util" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "tokio", "tokio-stream", @@ -9022,7 +9568,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "clap", "eyre", @@ -9036,30 +9582,33 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "aquamarine", "assert_matches", "auto_impl", - "bitflags 2.5.0", + "bitflags 2.6.0", "criterion", "futures-util", - "itertools 0.12.1", "metrics", "parking_lot 0.12.3", "paste", "pprof", "proptest", + "proptest-arbitrary-interop", "rand 0.8.5", + "reth-chainspec", "reth-eth-wire-types", + "reth-execution-types", "reth-fs-util", "reth-metrics", "reth-primitives", "reth-provider", "reth-tasks", "reth-tracing", - "rustc-hash", + "revm", + "rustc-hash 2.0.0", "schnellru", "serde", "serde_json", @@ -9073,7 +9622,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "auto_impl", @@ -9082,7 +9631,9 @@ dependencies = [ "metrics", "once_cell", "proptest", + "proptest-arbitrary-interop", "rayon", + "reth-chainspec", "reth-db", "reth-db-api", "reth-execution-errors", @@ -9091,8 +9642,9 @@ dependencies = [ "reth-provider", "reth-stages-types", "reth-storage-errors", - "reth-trie-types", + "reth-trie-common", "revm", + "serde", "serde_json", "similar-asserts", "tokio", @@ -9101,16 +9653,46 @@ dependencies = [ "triehash", ] +[[package]] +name = "reth-trie-common" +version = "1.0.1" +dependencies = [ + "alloy-consensus", + "alloy-genesis", + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "arbitrary", + "assert_matches", + "bytes", + "derive_more", + "hash-db", + "itertools 0.13.0", + "nybbles", + "plain_hasher", + "proptest", + "proptest-arbitrary-interop", + "proptest-derive 0.5.0", + "reth-codecs", + "reth-primitives-traits", + "revm-primitives", + "serde", + "serde_json", + "test-fuzz", + "toml", +] + [[package]] name = "reth-trie-parallel" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-rlp", "criterion", "derive_more", - "itertools 0.12.1", + "itertools 0.13.0", "metrics", "proptest", + "proptest-arbitrary-interop", "rand 0.8.5", "rayon", "reth-db", @@ -9126,31 +9708,11 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-trie-types" -version = "0.2.0-beta.9" -dependencies = [ - "alloy-primitives", - "alloy-rlp", - "alloy-trie", - "arbitrary", - "assert_matches", - "bytes", - "derive_more", - "nybbles", - "proptest", - "proptest-derive", - "reth-codecs", - "serde", - "serde_json", - "test-fuzz", - "toml", -] - [[package]] name = "revm" -version = "9.0.0" -source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" +version = "11.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44102920a77b38b0144f4b84dcaa31fe44746e78f53685c2ca0149af5312e048" dependencies = [ "auto_impl", "cfg-if", @@ -9163,12 +9725,12 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.1.0" -source = "git+https://github.com/paradigmxyz/revm-inspectors?rev=5e3058a#5e3058a87caa24df748e090083ef76518d082c10" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "083fe9c20db39ab4d371e9c4d10367408fa3565ad277a4fa1770f7d9314e1b92" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=14ed25d)", - "alloy-rpc-types-trace", + "alloy-rpc-types", "alloy-sol-types", "anstyle", "boa_engine", @@ -9181,8 +9743,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "5.0.0" -source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b319602039af3d130f792beba76592e7744bb3c4f2db5179758be33985a16b" dependencies = [ "revm-primitives", "serde", @@ -9190,30 +9753,34 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "7.0.0" -source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b441000a0d30e06269f822f42a13fa6bec922e951a84b643818651472c4fe6" dependencies = [ "aurora-engine-modexp", "blst", "c-kzg", + "cfg-if", "k256", "once_cell", "p256", "revm-primitives", "ripemd", - "secp256k1 0.29.0", + "secp256k1", "sha2 0.10.8", "substrate-bn", ] [[package]] name = "revm-primitives" -version = "4.0.0" -source = "git+https://github.com/bluealloy/revm?rev=a28a543#a28a5439b9cfb7494cbd670da10cbedcfe6c5854" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b518f536bacee396eb28a43f0984b25b2cd80f052ba4f2e794d554d711c13f33" dependencies = [ + "alloy-eips", "alloy-primitives", "auto_impl", - "bitflags 2.5.0", + "bitflags 2.6.0", "bitvec", "c-kzg", "cfg-if", @@ -9238,28 +9805,13 @@ dependencies = [ [[package]] name = "rgb" -version = "0.8.37" +version = "0.8.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" +checksum = "1aee83dc281d5a3200d37b299acd13b81066ea126a7f16f0eae70fc9aed241d9" dependencies = [ "bytemuck", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.8" @@ -9270,8 +9822,8 @@ dependencies = [ "cfg-if", "getrandom 0.2.15", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.52.0", ] @@ -9311,9 +9863,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7699249cc2c7d71939f30868f47e9d7add0bdc030d90ee10bfd16887ff8bb1c8" +checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" dependencies = [ "bytemuck", "byteorder", @@ -9334,20 +9886,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" -[[package]] -name = "rpc-db" -version = "0.0.0" -dependencies = [ - "eyre", - "futures", - "jsonrpsee", - "reth", - "reth-db", - "reth-db-api", - "reth-node-ethereum", - "tokio", -] - [[package]] name = "ruint" version = "1.12.3" @@ -9385,7 +9923,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink 0.9.1", @@ -9405,6 +9943,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc-hex" version = "2.1.0" @@ -9435,7 +9979,7 @@ version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -9449,7 +9993,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.8", + "ring", "rustls-webpki 0.101.7", "sct", ] @@ -9461,24 +10005,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.8", + "ring", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.5", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.9" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a218f0f6d05669de4eabfb24f31ce802035c952429d037507b4a4a39f0e60c5b" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "log", "once_cell", - "ring 0.17.8", + "ring", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.5", "subtle", "zeroize", ] @@ -9497,9 +10041,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.2", @@ -9535,22 +10079,22 @@ checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-platform-verifier" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" +checksum = "3e3beb939bcd33c269f4bf946cc829fcd336370267c4a927ac0399c84a3151a1" dependencies = [ "core-foundation", "core-foundation-sys", "jni", "log", "once_cell", - "rustls 0.23.9", - "rustls-native-certs 0.7.0", + "rustls 0.23.10", + "rustls-native-certs 0.7.1", "rustls-platform-verifier-android", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.5", "security-framework", "security-framework-sys", - "webpki-roots 0.26.2", + "webpki-roots 0.26.3", "winapi", ] @@ -9566,19 +10110,19 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "f9a6fccd794a42c2c105b513a2f62bc3fd8f3ba57a4593677ceb0bd035164d78" dependencies = [ - "ring 0.17.8", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -9633,9 +10177,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" +checksum = "af947d0ca10a2f3e00c7ec1b515b7c83e5cb3fa62d4c11a64301d9eec54440e9" dependencies = [ "sdd", ] @@ -9672,8 +10216,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -9702,17 +10246,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" -dependencies = [ - "rand 0.8.5", - "secp256k1-sys 0.9.2", - "serde", -] - [[package]] name = "secp256k1" version = "0.29.0" @@ -9720,16 +10253,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" dependencies = [ "rand 0.8.5", - "secp256k1-sys 0.10.0", -] - -[[package]] -name = "secp256k1-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" -dependencies = [ - "cc", + "secp256k1-sys", + "serde", ] [[package]] @@ -9747,7 +10272,7 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -9812,38 +10337,38 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.14" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "indexmap 2.2.6", "itoa", @@ -9885,9 +10410,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.1" +version = "3.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" dependencies = [ "base64 0.22.1", "chrono", @@ -9903,14 +10428,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.1" +version = "3.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" +checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -9935,7 +10460,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10197,27 +10722,12 @@ dependencies = [ "sha1", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -[[package]] -name = "spinning" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d4f0e86297cad2658d92a707320d87bf4e6ae1050287f51d19b67ef3f153a7b" -dependencies = [ - "lock_api", -] - [[package]] name = "spki" version = "0.7.3" @@ -10245,12 +10755,12 @@ dependencies = [ [[package]] name = "stability" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" dependencies = [ "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10300,9 +10810,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ "strum_macros 0.26.4", ] @@ -10317,7 +10827,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10330,7 +10840,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10348,9 +10858,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "sucds" @@ -10398,9 +10908,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "201fcda3845c23e8212cd466bfebf0bd20694490fc0356ae8e428e0824a915a6" dependencies = [ "proc-macro2", "quote", @@ -10409,14 +10919,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d71e19bca02c807c9faa67b5a47673ff231b6e7449b251695188522f1dc44b2" +checksum = "c837dc8852cb7074e46b444afb81783140dab12c58867b49fb3898fbafedf7ea" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10439,7 +10949,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10467,7 +10977,6 @@ dependencies = [ "libc", "ntapi", "once_cell", - "rayon", "windows 0.52.0", ] @@ -10566,7 +11075,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10605,7 +11114,27 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", +] + +[[package]] +name = "thiserror-impl-no-std" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "thiserror-no-std" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" +dependencies = [ + "thiserror-impl-no-std", ] [[package]] @@ -10723,9 +11252,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "ce6b6a2fb3a985e99cebfaefa9faa3024743da73304ca1c683a36429613d3d22" dependencies = [ "tinyvec_macros", ] @@ -10736,12 +11265,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" -[[package]] -name = "to_method" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c4ceeeca15c8384bbc3e011dbd8fccb7f068a440b752b7d9b32ceb0ca0e2e8" - [[package]] name = "tokio" version = "1.38.0" @@ -10769,7 +11292,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -10799,7 +11322,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -10818,18 +11341,18 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "becd34a233e7e31a3dbf7c7241b38320f57393dcae8e7324b0167d21b8e320b0" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tungstenite", - "webpki-roots 0.26.2", + "webpki-roots 0.26.3", ] [[package]] @@ -10921,7 +11444,7 @@ checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", "base64 0.21.7", - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "futures-core", "futures-util", @@ -10988,7 +11511,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -11140,9 +11663,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a5f13f11071020bb12de7a16b925d2d58636175c20c11dc5f96cb64bb6c9b3" +checksum = "5b1e5645f2ee8025c2f1d75e1138f2dd034d74e6ba54620f3c569ba2a2a1ea06" dependencies = [ "glob", "serde", @@ -11165,23 +11688,13 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pki-types", "sha1", "thiserror", "utf-8", ] -[[package]] -name = "txpool-tracing" -version = "0.0.0" -dependencies = [ - "clap", - "futures-util", - "reth", - "reth-node-ethereum", -] - [[package]] name = "typenum" version = "1.17.0" @@ -11190,7 +11703,7 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "types" -version = "0.2.0-beta.9" +version = "1.0.1" dependencies = [ "alloy-primitives", "reth", @@ -11274,6 +11787,12 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + [[package]] name = "universal-hash" version = "0.4.0" @@ -11296,12 +11815,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -11310,12 +11823,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna 1.0.0", + "idna 0.5.0", "percent-encoding", "serde", ] @@ -11346,9 +11859,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.8.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ "getrandom 0.2.15", ] @@ -11470,7 +11983,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", "wasm-bindgen-shared", ] @@ -11504,7 +12017,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11538,6 +12051,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.25.4" @@ -11546,25 +12069,13 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - [[package]] name = "widestring" version = "1.1.0" @@ -11609,17 +12120,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core 0.52.0", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] name = "windows" -version = "0.56.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1de69df01bdf1ead2f4ac895dc77c9351aefff65b2f3db429a343f9cbf05e132" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-core 0.56.0", - "windows-targets 0.52.5", + "windows-core 0.57.0", + "windows-targets 0.52.6", ] [[package]] @@ -11628,41 +12139,41 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.56.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4698e52ed2d08f8658ab0c39512a7c00ee5fe2688c65f8c0a4f06750d729f2a6" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ "windows-implement", "windows-interface", "windows-result", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] name = "windows-implement" -version = "0.56.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6fc35f58ecd95a9b71c4f2329b911016e6bec66b3f2e6a4aad86bd2e99e2f9b" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "windows-interface" -version = "0.56.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08990546bf4edef8f431fa6326e032865f27138718c587dc21bc0265bbcb57cc" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -11671,7 +12182,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -11689,7 +12200,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -11709,18 +12220,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -11731,9 +12242,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -11743,9 +12254,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -11755,15 +12266,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -11773,9 +12284,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -11785,9 +12296,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -11797,9 +12308,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -11809,9 +12320,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -11884,7 +12395,7 @@ dependencies = [ [[package]] name = "wvm-exexed" -version = "0.1.0" +version = "1.0.0" dependencies = [ "alloy-primitives", "bigquery", @@ -11952,7 +12463,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", "synstructure", ] @@ -11967,7 +12478,7 @@ dependencies = [ "base64 0.21.7", "futures", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "itertools 0.12.1", "log", @@ -11994,7 +12505,7 @@ dependencies = [ "base64 0.21.7", "futures", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.25.0", "itertools 0.12.1", "log", @@ -12012,22 +12523,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -12047,7 +12558,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", "synstructure", ] @@ -12068,14 +12579,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] name = "zerovec" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", @@ -12084,13 +12595,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.69", ] [[package]] @@ -12104,11 +12615,11 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ - "zstd-safe 7.1.0", + "zstd-safe 7.2.0", ] [[package]] @@ -12123,18 +12634,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.1.0" +version = "7.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +checksum = "fa556e971e7b568dc775c136fc9de8c779b1c2fc3a63defaafadffdbd3181afa" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" +version = "2.0.12+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" +checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 4a95dd007c75..6fe750345dab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] -version = "0.2.0-beta.9" +version = "1.0.1" edition = "2021" -rust-version = "1.76" +rust-version = "1.79" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" @@ -9,10 +9,15 @@ exclude = [".github/"] [workspace] members = [ + "bin/reth-bench/", "bin/reth/", "crates/blockchain-tree/", "crates/blockchain-tree-api/", + "crates/chainspec/", + "crates/cli/cli/", + "crates/cli/commands/", "crates/cli/runner/", + "crates/cli/util/", "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", @@ -21,10 +26,14 @@ members = [ "crates/consensus/debug-client/", "crates/ethereum-forks/", "crates/e2e-test-utils/", - "crates/engine-primitives/", + "crates/engine/primitives/", + "crates/engine/tree/", + "crates/engine/util/", "crates/errors/", "crates/ethereum-forks/", + "crates/ethereum/cli/", "crates/ethereum/consensus/", + "crates/ethereum/engine/", "crates/ethereum/engine-primitives/", "crates/ethereum/evm", "crates/ethereum/node", @@ -38,7 +47,7 @@ members = [ "crates/exex/types/", "crates/metrics/", "crates/metrics/metrics-derive/", - "crates/net/common/", + "crates/net/banlist/", "crates/net/discv4/", "crates/net/discv5/", "crates/net/dns/", @@ -48,18 +57,21 @@ members = [ "crates/net/eth-wire/", "crates/net/nat/", "crates/net/network-api/", + "crates/net/network-types/", "crates/net/network/", "crates/net/p2p/", "crates/net/peers/", - "crates/node-core/", + "crates/node/core/", "crates/node/api/", "crates/node/builder/", "crates/node/events/", + "crates/optimism/cli", "crates/optimism/consensus", "crates/optimism/evm/", "crates/optimism/node/", "crates/optimism/payload/", "crates/optimism/primitives/", + "crates/optimism/rpc/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/primitives/", @@ -73,8 +85,11 @@ members = [ "crates/rpc/rpc-api/", "crates/rpc/rpc-builder/", "crates/rpc/rpc-engine-api/", + "crates/rpc/rpc-eth-api/", + "crates/rpc/rpc-eth-types/", "crates/rpc/rpc-layer", "crates/rpc/rpc-testing-util/", + "crates/rpc/rpc-server-types/", "crates/rpc/rpc-types-compat/", "crates/rpc/rpc-types/", "crates/rpc/rpc/", @@ -98,15 +113,16 @@ members = [ "crates/tokio-util/", "crates/tracing/", "crates/transaction-pool/", + "crates/trie/common", "crates/trie/parallel/", "crates/trie/trie", - "crates/trie/types", "examples/beacon-api-sidecar-fetcher/", "examples/beacon-api-sse/", "examples/bsc-p2p", "examples/custom-dev-node/", "examples/custom-engine-types/", "examples/custom-evm/", + "examples/stateful-precompile/", "examples/custom-inspector/", "examples/custom-node-components/", "examples/custom-payload-builder/", @@ -120,6 +136,9 @@ members = [ "examples/polygon-p2p/", "examples/rpc-db/", "examples/txpool-tracing/", + "examples/custom-rlpx-subprotocol", + "examples/exex/minimal/", + "examples/exex/op-bridge/", "testing/ef-tests/", "testing/testing-utils/", "wvm-apps/wvm-exexed/", @@ -198,6 +217,7 @@ zero_sized_map_values = "warn" single_char_pattern = "warn" needless_continue = "warn" enum_glob_use = "warn" +iter_without_into_iter = "warn" # These are nursery lints which have findings. Allow them for now. Some are not # quite mature enough for use in our codebase and some we don't really want. @@ -228,30 +248,43 @@ opt-level = 3 lto = "thin" [profile.release] +opt-level = 3 lto = "thin" -strip = "debuginfo" +debug = "line-tables-only" +strip = true +panic = "unwind" +codegen-units = 16 -# Like release, but with full debug symbols. Useful for e.g. `perf`. -[profile.debug-fast] +# Use the `--profile profiling` flag to show symbols in release mode. +# e.g. `cargo build --profile profiling` +[profile.profiling] inherits = "release" -strip = "none" -debug = true +debug = 2 +strip = false + +# Make sure debug symbols are in the bench profile +[profile.bench] +inherits = "profiling" [profile.maxperf] inherits = "release" lto = "fat" codegen-units = 1 -incremental = false [workspace.dependencies] # reth reth = { path = "bin/reth" } +reth-bench = { path = "bin/reth-bench" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } +reth-chainspec = { path = "crates/chainspec" } +reth-cli = { path = "crates/cli/cli" } +reth-cli-commands = { path = "crates/cli/commands" } reth-cli-runner = { path = "crates/cli/runner" } +reth-cli-util = { path = "crates/cli/util" } reth-codecs = { path = "crates/storage/codecs" } reth-codecs-derive = { path = "crates/storage/codecs/derive" } reth-config = { path = "crates/config" } @@ -267,10 +300,13 @@ reth-dns-discovery = { path = "crates/net/dns" } reth-downloaders = { path = "crates/net/downloaders" } reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-ecies = { path = "crates/net/ecies" } -reth-engine-primitives = { path = "crates/engine-primitives" } +reth-engine-primitives = { path = "crates/engine/primitives" } +reth-engine-tree = { path = "crates/engine/tree" } +reth-engine-util = { path = "crates/engine/util" } reth-errors = { path = "crates/errors" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } +reth-ethereum-cli = { path = "crates/ethereum/cli" } reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-ethereum-forks = { path = "crates/ethereum-forks" } @@ -290,22 +326,25 @@ reth-libmdbx = { path = "crates/storage/libmdbx-rs" } reth-mdbx-sys = { path = "crates/storage/libmdbx-rs/mdbx-sys" } reth-metrics = { path = "crates/metrics" } reth-metrics-derive = { path = "crates/metrics/metrics-derive" } -reth-net-common = { path = "crates/net/common" } +reth-net-banlist = { path = "crates/net/banlist" } reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } +reth-network-types = { path = "crates/net/network-types" } reth-network-peers = { path = "crates/net/peers", default-features = false } reth-network-p2p = { path = "crates/net/p2p" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } reth-node-api = { path = "crates/node/api" } reth-node-builder = { path = "crates/node/builder" } -reth-node-core = { path = "crates/node-core" } +reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } +reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } @@ -320,7 +359,9 @@ reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } reth-rpc-builder = { path = "crates/rpc/rpc-builder" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } +reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" } reth-rpc-layer = { path = "crates/rpc/rpc-layer" } +reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types" } reth-rpc-server-types = { path = "crates/rpc/rpc-server-types" } reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } @@ -337,19 +378,19 @@ reth-tokio-util = { path = "crates/tokio-util" } reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie/trie" } +reth-trie-common = { path = "crates/trie/common" } reth-trie-parallel = { path = "crates/trie/parallel" } -reth-trie-types = { path = "crates/trie/types" } # revm -revm = { version = "9.0.0", features = [ +revm = { version = "11.0.0", features = [ "std", "secp256k1", "blst", ], default-features = false } -revm-primitives = { version = "4.0.0", features = [ +revm-primitives = { version = "6.0.0", features = [ "std", ], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "5e3058a" } +revm-inspectors = "0.4" # eth alloy-chains = "0.1.15" @@ -358,21 +399,40 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.4" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d", default-features = false, features = [ +alloy-rpc-types = { version = "0.1", default-features = false, features = [ + "eth", +] } +alloy-rpc-types-anvil = { version = "0.1", default-features = false } +alloy-rpc-types-beacon = { version = "0.1", default-features = false } +alloy-rpc-types-admin = { version = "0.1", default-features = false } +alloy-rpc-types-txpool = { version = "0.1", default-features = false } +alloy-serde = { version = "0.1", default-features = false } +alloy-rpc-types-engine = { version = "0.1", default-features = false } +alloy-rpc-types-eth = { version = "0.1", default-features = false } +alloy-rpc-types-mev = { version = "0.1", default-features = false } +alloy-rpc-types-trace = { version = "0.1", default-features = false } +alloy-genesis = { version = "0.1", default-features = false } +alloy-node-bindings = { version = "0.1", default-features = false } +alloy-provider = { version = "0.1", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "14ed25d" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" } +alloy-eips = { version = "0.1", default-features = false } +alloy-signer = { version = "0.1", default-features = false } +alloy-signer-local = { version = "0.1", default-features = false } +alloy-network = { version = "0.1", default-features = false } +alloy-consensus = { version = "0.1", default-features = false } +alloy-transport = { version = "0.1" } +alloy-transport-http = { version = "0.1", features = [ + "reqwest-rustls-tls", +], default-features = false } +alloy-transport-ws = { version = "0.1", default-features = false } +alloy-transport-ipc = { version = "0.1", default-features = false } +alloy-pubsub = { version = "0.1", default-features = false } +alloy-json-rpc = { version = "0.1", default-features = false } +alloy-rpc-client = { version = "0.1", default-features = false } + +# op +op-alloy-rpc-types = "0.1" # misc auto_impl = "1" @@ -380,25 +440,28 @@ aquamarine = "0.5" bytes = "1.5" bitflags = "2.4" clap = "4" +const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "5.5" derive_more = "0.99.17" fdlimit = "0.3.0" eyre = "0.6" generic-array = "0.14" +linked_hash_set = "0.1" tracing = "0.1.0" tracing-appender = "0.2" thiserror = "1.0" +thiserror-no-std = { version = "2.0.2", default-features = false } serde_json = "1.0.94" serde = { version = "1.0", default-features = false } serde_with = "3.3.0" humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" -rustc-hash = "1.1.0" +rustc-hash = "2.0" schnellru = "0.2" strum = "0.26" rayon = "1.7" -itertools = "0.12" +itertools = "0.13" parking_lot = "0.12" modular-bitfield = "0.11.2" once_cell = "1.17" @@ -410,12 +473,13 @@ sha2 = { version = "0.10", default-features = false } paste = "1.0" url = "2.3" backon = "0.4" +boyer-moore-magiclen = "0.2.16" # metrics -metrics = "0.22.0" -metrics-exporter-prometheus = { version = "0.14.0", default-features = false } -metrics-util = "0.16.0" -metrics-process = "2.0.0" +metrics = "0.23.0" +metrics-exporter-prometheus = { version = "0.15.0", default-features = false } +metrics-util = "0.17.0" +metrics-process = "2.1.0" # proc-macros proc-macro2 = "1.0" @@ -429,9 +493,10 @@ tokio-util = { version = "0.7.4", features = ["codec"] } # async async-stream = "0.3" async-trait = "0.1.68" -futures = "0.3.26" +futures = "0.3" +futures-util = "0.3" +futures-core = "0.3" pin-project = "1.0.12" -futures-util = "0.3.25" hyper = "1.3" hyper-util = "0.1.5" reqwest = { version = "0.12", default-features = false } @@ -440,7 +505,6 @@ tower-http = "0.5" # p2p discv5 = "0.6.0" -igd-next = "0.14.3" # rpc jsonrpsee = "0.23" @@ -451,17 +515,15 @@ jsonrpsee-http-client = "0.23" # http http = "1.0" http-body = "1.0" +jsonwebtoken = "9" +proptest-arbitrary-interop = "0.1.0" # crypto -secp256k1 = { version = "0.28", default-features = false, features = [ +secp256k1 = { version = "0.29", default-features = false, features = [ "global-context", "recovery", ] } -# TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 -enr = { version = "0.12.0", default-features = false, features = [ - "k256", - "rust-secp256k1", -] } +enr = { version = "0.12.1", default-features = false } # for eip-4844 c-kzg = "1.0.0" @@ -477,13 +539,8 @@ tempfile = "3.8" criterion = "0.5" pprof = "0.13" proptest = "1.4" -proptest-derive = "0.4" +proptest-derive = "0.5" serial_test = "3" similar-asserts = "1.5.0" test-fuzz = "5" - -[patch.crates-io] -revm = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } -revm-interpreter = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } -revm-precompile = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } -revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "a28a543" } +iai-callgrind = "0.11" diff --git a/Dockerfile b/Dockerfile index d2b71d23ac74..17518d8f011c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef WORKDIR /app -LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.source=https://github.com/weaveVM/wvm-reth LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" # Install system dependencies diff --git a/Makefile b/Makefile index de7b496e129a..d42c426671d4 100644 --- a/Makefile +++ b/Makefile @@ -466,11 +466,7 @@ test: make test-doc && \ make test-other-targets -cfg-check: - cargo +nightly -Zcheck-cfg c - pr: - make cfg-check && \ make lint && \ make update-book-cli && \ make test diff --git a/README.md b/README.md index 7862ac03f403..ad98ba8529f3 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,6 @@ | [Developer Docs](./docs) | [Crate Docs](https://reth.rs/docs) -_The project is still work in progress, see the [disclaimer below](#status)._ - [gh-ci]: https://github.com/paradigmxyz/reth/actions/workflows/unit.yml [gh-deny]: https://github.com/paradigmxyz/reth/actions/workflows/deny.yml [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth @@ -42,7 +40,7 @@ Reth is production ready, and suitable for usage in mission-critical environment More historical context below: * We released 1.0 "production-ready" stable Reth in June 2024. - * Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./Sigma_Prime_Paradigm_Reth_Security_Assessment_Report_v1_0.pdf). + * Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./audit/sigma_prime_audit_v2.pdf). * Revm (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. * We released multiple iterative beta versions, up to [beta.9](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.9) on Monday June 3rd 2024 the last beta release. * We released [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. @@ -89,7 +87,7 @@ When updating this, also update: - .github/workflows/lint.yml --> -The Minimum Supported Rust Version (MSRV) of this project is [1.76.0](https://blog.rust-lang.org/2024/02/08/Rust-1.76.0.html). +The Minimum Supported Rust Version (MSRV) of this project is [1.79.0](https://blog.rust-lang.org/2024/06/13/Rust-1.79.0.html). See the book for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source.html). diff --git a/Sigma_Prime_Paradigm_Reth_Security_Assessment_Report_v1_0.pdf b/audit/sigma_prime_audit_v2.pdf similarity index 51% rename from Sigma_Prime_Paradigm_Reth_Security_Assessment_Report_v1_0.pdf rename to audit/sigma_prime_audit_v2.pdf index 4b31fb2b4448..50da37d23f46 100644 Binary files a/Sigma_Prime_Paradigm_Reth_Security_Assessment_Report_v1_0.pdf and b/audit/sigma_prime_audit_v2.pdf differ diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml new file mode 100644 index 000000000000..a0bf299f19c2 --- /dev/null +++ b/bin/reth-bench/Cargo.toml @@ -0,0 +1,106 @@ +[package] +name = "reth-bench" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Benchmarking for ethereum nodes" +default-run = "reth-bench" + +[lints] +workspace = true + +[dependencies] +# reth +reth-provider = { workspace = true } +reth-cli-runner.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-node-core.workspace = true +reth-node-api.workspace = true +reth-rpc-types.workspace = true +reth-rpc-types-compat.workspace = true +reth-primitives = { workspace = true, features = ["alloy-compat"] } +reth-tracing.workspace = true + +# alloy +alloy-provider = { workspace = true, features = ["engine-api", "reqwest-rustls-tls"], default-features = false } +alloy-rpc-types-engine.workspace = true +alloy-transport.workspace = true +alloy-transport-http.workspace = true +alloy-transport-ws.workspace = true +alloy-transport-ipc.workspace = true +alloy-pubsub.workspace = true +alloy-json-rpc.workspace = true +alloy-rpc-client.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true + +# reqwest +reqwest = { workspace = true, default-features = false, features = [ + "rustls-tls-native-roots", +] } + +# tower +tower.workspace = true + +# tracing +tracing.workspace = true + +# io +serde.workspace = true +serde_json.workspace = true + +# async +tokio = { workspace = true, features = [ + "sync", + "macros", + "time", + "rt-multi-thread", +] } +tokio-util.workspace = true +futures.workspace = true +async-trait.workspace = true + +# misc +eyre.workspace = true +thiserror.workspace = true +clap = { workspace = true, features = ["derive", "env"] } + +# for writing data +csv = "1.3.0" + +[target.'cfg(unix)'.dependencies] +tikv-jemallocator = { version = "0.5.0", optional = true } +libc = "0.2" + +[dev-dependencies] +reth-tracing.workspace = true + +[features] +default = ["jemalloc"] + +asm-keccak = ["reth-primitives/asm-keccak"] + +jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] +jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] + +min-error-logs = ["tracing/release_max_level_error"] +min-warn-logs = ["tracing/release_max_level_warn"] +min-info-logs = ["tracing/release_max_level_info"] +min-debug-logs = ["tracing/release_max_level_debug"] +min-trace-logs = ["tracing/release_max_level_trace"] + +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-node-core/optimism", +] + +# no-op feature flag for switching between the `optimism` and default functionality in CI matrices +ethereum = [] + +[[bin]] +name = "reth-bench" +path = "src/main.rs" diff --git a/bin/reth-bench/README.md b/bin/reth-bench/README.md new file mode 100644 index 000000000000..fa58d467f3d3 --- /dev/null +++ b/bin/reth-bench/README.md @@ -0,0 +1,65 @@ +# Benchmarking reth live sync with `reth-bench` + +The binary contained in this directory, `reth-bench`, is a tool that can be used to benchmark the performance of the reth live sync. `reth-bench` is a general tool, and can be used for benchmarking node performance, as long as the node supports the engine API. + +### A recap on node synchronization +Reth uses two primary methods for synchronizing the chain: + * Historical sync, which is used to synchronize the chain from genesis to a known finalized block. This involves re-executing the entire chain history. + * Live sync, which is used to synchronize the chain from a finalized block to the current head. This involves processing new blocks as they are produced. + +Benchmarking historical sync for reth is fairly easy, because historical sync is a long-running, deterministic process. +Reth specifically contains the `--debug.tip` argument, which allows for running the historical sync pipeline to a specific block. +However, reth's historical sync applies optimizations that are not always possible when syncing new blocks. + + +Live sync, on the other hand, is a more complex process that is harder to benchmark. It is also more sensitive to network conditions of the CL. +In order to benchmark live sync, we need to simulate a CL in a controlled manner, so reth can use the same code paths it would when syncing new blocks. + +### The `reth-bench` tool +The `reth-bench` tool is designed to benchmark performance of reth live sync. +It can also be used for debugging client spec implementations, as it replays historical blocks by mocking a CL client. +Performance is measured by latency and gas used in a block, as well as the computed gas used per second. +As long as the data is representative of real-world load, or closer to worst-case load test, the gas per second gives a rough sense of how much throughput the node would be able to handle. + +## Prerequisites + +If you will be collecting CPU profiles, make sure `reth` is compiled with the `profiling` profile. +Otherwise, running `make maxperf` at the root of the repo should be sufficient for collecting accurate performance metrics. + +## Command Usage + +`reth-bench` contains different commands to benchmark different patterns of engine API calls. +The `reth-bench new-payload-fcu` command is the most representative of ethereum mainnet live sync, alternating between sending `engine_newPayload` calls and `engine_forkchoiceUpdated` calls. + +Below is an overview of how to execute a benchmark: + + 1. **Setup**: Make sure `reth` is running in the background with the proper configuration. This setup involves ensuring the node is at the correct state, setting up profiling tools, and possibly more depending on the purpose of the benchmark's. + + 2. **Run the Benchmark**: + ```bash + reth-bench new-payload-fcu --rpc-url http://:8545 --from --to --jwtsecret + ``` + + Replace ``, ``, ``, and `` with the appropriate values for your testing environment. + Note that this assumes that the benchmark node's engine API is running on `http://127.0.0.1:8545`, which is set as a default value in `reth-bench`. To configure this value, use the `--engine-rpc-url` flag. + + 3. **Observe Outputs**: Upon running the command, `reth-bench` will output benchmark results, showing processing speeds and gas usage, which are crucial for analyzing the node's performance. + + Example output: + ``` + 2024-05-30T00:45:20.806691Z INFO Running benchmark using data from RPC URL: http://:8545 + // ... logs per block + 2024-05-30T00:45:34.203172Z INFO Total Ggas/s: 0.15 total_duration=5.085704882s total_gas_used=741620668.0 + ``` + + 4. **Stop and Review**: Once the benchmark completes, terminate the `reth` process and review the logs and performance metrics collected, if any. + 5. **Repeat**. + +## Additional Considerations + +- **RPC Configuration**: The RPC endpoints should be accessible and configured correctly, specifically the RPC endpoint must support `eth_getBlockByNumber` and support fetching full transactions. The benchmark will make one RPC query per block as fast as possible, so ensure the RPC endpoint does not rate limit or block requests after a certain volume. +- **Reproducibility**: Ensure that the node is at the same state before attempting to retry a benchmark. The `new-payload-fcu` command specifically will commit to the database, so the node must be rolled back using `reth stage unwind` to reproducibly retry benchmarks. +- **Profiling tools**: If you are collecting CPU profiles, tools like [`samply`](https://github.com/mstange/samply) and [`perf`](https://perf.wiki.kernel.org/index.php/Main_Page) can be useful for analyzing node performance. +- **Benchmark Data**: `reth-bench` additionally contains a `--benchmark.output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis. +- **Platform Information**: To ensure accurate and reproducible benchmarking, document the platform details, including hardware specifications, OS version, and any other relevant information before publishing any benchmarks. + diff --git a/bin/reth-bench/src/authenticated_transport.rs b/bin/reth-bench/src/authenticated_transport.rs new file mode 100644 index 000000000000..c946d244de9e --- /dev/null +++ b/bin/reth-bench/src/authenticated_transport.rs @@ -0,0 +1,267 @@ +//! This contains an authenticated rpc transport that can be used to send engine API newPayload +//! requests. + +use std::sync::Arc; + +use alloy_json_rpc::{RequestPacket, ResponsePacket}; +use alloy_pubsub::{PubSubConnect, PubSubFrontend}; +use alloy_rpc_types_engine::{Claims, JwtSecret}; +use alloy_transport::{ + utils::guess_local_url, Authorization, Pbf, TransportConnect, TransportError, + TransportErrorKind, TransportFut, +}; +use alloy_transport_http::{reqwest::Url, Http, ReqwestTransport}; +use alloy_transport_ipc::IpcConnect; +use alloy_transport_ws::WsConnect; +use futures::FutureExt; +use reqwest::header::HeaderValue; +use std::task::{Context, Poll}; +use tokio::sync::RwLock; +use tower::Service; + +/// An enum representing the different transports that can be used to connect to a runtime. +/// Only meant to be used internally by [`AuthenticatedTransport`]. +#[derive(Clone, Debug)] +pub enum InnerTransport { + /// HTTP transport + Http(ReqwestTransport), + /// `WebSocket` transport + Ws(PubSubFrontend), + /// IPC transport + Ipc(PubSubFrontend), +} + +impl InnerTransport { + /// Connects to a transport based on the given URL and JWT. Returns an [`InnerTransport`] and + /// the [`Claims`] generated from the jwt. + async fn connect( + url: Url, + jwt: JwtSecret, + ) -> Result<(Self, Claims), AuthenticatedTransportError> { + match url.scheme() { + "http" | "https" => Self::connect_http(url, jwt), + "ws" | "wss" => Self::connect_ws(url, jwt).await, + "file" => Ok((Self::connect_ipc(url).await?, Claims::default())), + _ => Err(AuthenticatedTransportError::BadScheme(url.scheme().to_string())), + } + } + + /// Connects to an HTTP [`alloy_transport_http::Http`] transport. Returns an [`InnerTransport`] + /// and the [Claims] generated from the jwt. + fn connect_http( + url: Url, + jwt: JwtSecret, + ) -> Result<(Self, Claims), AuthenticatedTransportError> { + let mut client_builder = + reqwest::Client::builder().tls_built_in_root_certs(url.scheme() == "https"); + let mut headers = reqwest::header::HeaderMap::new(); + + // Add the JWT it to the headers if we can decode it. + let (auth, claims) = + build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?; + + let mut auth_value: HeaderValue = + HeaderValue::from_str(&auth.to_string()).expect("Header should be valid string"); + auth_value.set_sensitive(true); + + headers.insert(reqwest::header::AUTHORIZATION, auth_value); + client_builder = client_builder.default_headers(headers); + + let client = + client_builder.build().map_err(AuthenticatedTransportError::HttpConstructionError)?; + + let inner = Self::Http(Http::with_client(client, url)); + Ok((inner, claims)) + } + + /// Connects to a `WebSocket` [`alloy_transport_ws::WsConnect`] transport. Returns an + /// [`InnerTransport`] and the [`Claims`] generated from the jwt. + async fn connect_ws( + url: Url, + jwt: JwtSecret, + ) -> Result<(Self, Claims), AuthenticatedTransportError> { + // Add the JWT it to the headers if we can decode it. + let (auth, claims) = + build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?; + + let inner = WsConnect { url: url.to_string(), auth: Some(auth) } + .into_service() + .await + .map(Self::Ws) + .map_err(|e| AuthenticatedTransportError::TransportError(e, url.to_string()))?; + + Ok((inner, claims)) + } + + /// Connects to an IPC [`alloy_transport_ipc::IpcConnect`] transport. Returns an + /// [`InnerTransport`]. Does not return any [`Claims`] because IPC does not require them. + async fn connect_ipc(url: Url) -> Result { + // IPC, even for engine, typically does not require auth because it's local + IpcConnect::new(url.to_string()) + .into_service() + .await + .map(InnerTransport::Ipc) + .map_err(|e| AuthenticatedTransportError::TransportError(e, url.to_string())) + } +} + +/// An authenticated transport that can be used to send requests that contain a jwt bearer token. +#[derive(Debug, Clone)] +pub struct AuthenticatedTransport { + /// The inner actual transport used. + /// + /// Also contains the current claims being used. This is used to determine whether or not we + /// should create another client. + inner_and_claims: Arc>, + /// The current jwt being used. This is so we can recreate claims. + jwt: JwtSecret, + /// The current URL being used. This is so we can recreate the client if needed. + url: Url, +} + +/// An error that can occur when creating an authenticated transport. +#[derive(Debug, thiserror::Error)] +pub enum AuthenticatedTransportError { + /// The URL is invalid. + #[error("The URL is invalid")] + InvalidUrl, + /// Failed to lock transport + #[error("Failed to lock transport")] + LockFailed, + /// The JWT is invalid. + #[error("The JWT is invalid: {0}")] + InvalidJwt(String), + /// The transport failed to connect. + #[error("The transport failed to connect to {1}, transport error: {0}")] + TransportError(TransportError, String), + /// The http client could not be built. + #[error("The http client could not be built")] + HttpConstructionError(reqwest::Error), + /// The scheme is invalid. + #[error("The URL scheme is invalid: {0}")] + BadScheme(String), +} + +impl AuthenticatedTransport { + /// Create a new builder with the given URL. + pub async fn connect(url: Url, jwt: JwtSecret) -> Result { + let (inner, claims) = InnerTransport::connect(url.clone(), jwt).await?; + Ok(Self { inner_and_claims: Arc::new(RwLock::new((inner, claims))), jwt, url }) + } + + /// Sends a request using the underlying transport. + /// + /// For sending the actual request, this action is delegated down to the underlying transport + /// through Tower's [`tower::Service::call`]. See tower's [`tower::Service`] trait for more + /// information. + fn request(&self, req: RequestPacket) -> TransportFut<'static> { + let this = self.clone(); + + Box::pin(async move { + let mut inner_and_claims = this.inner_and_claims.write().await; + + // shift the iat forward by one second so there is some buffer time + let mut shifted_claims = inner_and_claims.1; + shifted_claims.iat -= 1; + + // if the claims are out of date, reset the inner transport + if !shifted_claims.is_within_time_window() { + let (new_inner, new_claims) = + InnerTransport::connect(this.url.clone(), this.jwt).await.map_err(|e| { + TransportError::Transport(TransportErrorKind::Custom(Box::new(e))) + })?; + *inner_and_claims = (new_inner, new_claims); + } + + match inner_and_claims.0 { + InnerTransport::Http(ref http) => { + let mut http = http; + http.call(req) + } + InnerTransport::Ws(ref ws) => { + let mut ws = ws; + ws.call(req) + } + InnerTransport::Ipc(ref ipc) => { + let mut ipc = ipc; + // we don't need to recreate the client for IPC + ipc.call(req) + } + } + .await + }) + } +} + +fn build_auth(secret: JwtSecret) -> eyre::Result<(Authorization, Claims)> { + // Generate claims (iat with current timestamp), this happens by default using the Default trait + // for Claims. + let claims = Claims::default(); + let token = secret.encode(&claims)?; + let auth = Authorization::Bearer(token); + + Ok((auth, claims)) +} + +/// This specifies how to connect to an authenticated transport. +#[derive(Clone, Debug)] +pub struct AuthenticatedTransportConnect { + /// The URL to connect to. + url: Url, + /// The JWT secret used to authenticate the transport. + jwt: JwtSecret, +} + +impl AuthenticatedTransportConnect { + /// Create a new builder with the given URL. + pub const fn new(url: Url, jwt: JwtSecret) -> Self { + Self { url, jwt } + } +} + +impl TransportConnect for AuthenticatedTransportConnect { + type Transport = AuthenticatedTransport; + + fn is_local(&self) -> bool { + guess_local_url(&self.url) + } + + fn get_transport<'a: 'b, 'b>(&'a self) -> Pbf<'b, Self::Transport, TransportError> { + AuthenticatedTransport::connect(self.url.clone(), self.jwt) + .map(|res| match res { + Ok(transport) => Ok(transport), + Err(err) => { + Err(TransportError::Transport(TransportErrorKind::Custom(Box::new(err)))) + } + }) + .boxed() + } +} + +impl tower::Service for AuthenticatedTransport { + type Response = ResponsePacket; + type Error = TransportError; + type Future = TransportFut<'static>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: RequestPacket) -> Self::Future { + self.request(req) + } +} + +impl tower::Service for &AuthenticatedTransport { + type Response = ResponsePacket; + type Error = TransportError; + type Future = TransportFut<'static>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: RequestPacket) -> Self::Future { + self.request(req) + } +} diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs new file mode 100644 index 000000000000..7f45ee6adfe8 --- /dev/null +++ b/bin/reth-bench/src/bench/context.rs @@ -0,0 +1,111 @@ +//! This contains the [`BenchContext`], which is information that all replay-based benchmarks need. +//! The initialization code is also the same, so this can be shared across benchmark commands. + +use crate::{authenticated_transport::AuthenticatedTransportConnect, bench_mode::BenchMode}; +use alloy_eips::BlockNumberOrTag; +use alloy_provider::{ + network::{AnyNetwork, Ethereum}, + Provider, ProviderBuilder, RootProvider, +}; +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_types_engine::JwtSecret; +use alloy_transport::BoxTransport; +use alloy_transport_http::Http; +use reqwest::{Client, Url}; +use reth_node_core::args::BenchmarkArgs; +use tracing::info; + +/// This is intended to be used by benchmarks that replay blocks from an RPC. +/// +/// It contains an authenticated provider for engine API queries, a block provider for block +/// queries, a [`BenchMode`] to determine whether the benchmark should run for a closed or open +/// range of blocks, and the next block to fetch. +pub(crate) struct BenchContext { + /// The auth provider used for engine API queries. + pub(crate) auth_provider: RootProvider, + /// The block provider used for block queries. + pub(crate) block_provider: RootProvider, Ethereum>, + /// The benchmark mode, which defines whether the benchmark should run for a closed or open + /// range of blocks. + pub(crate) benchmark_mode: BenchMode, + /// The next block to fetch. + pub(crate) next_block: u64, +} + +impl BenchContext { + /// This is the initialization code for most benchmarks, taking in a [`BenchmarkArgs`] and + /// returning the providers needed to run a benchmark. + pub(crate) async fn new(bench_args: &BenchmarkArgs, rpc_url: String) -> eyre::Result { + info!("Running benchmark using data from RPC URL: {}", rpc_url); + + // Ensure that output directory is a directory + if let Some(output) = &bench_args.output { + if output.is_file() { + return Err(eyre::eyre!("Output path must be a directory")); + } + } + + // set up alloy client for blocks + let block_provider = ProviderBuilder::new().on_http(rpc_url.parse()?); + + // If neither `--from` nor `--to` are provided, we will run the benchmark continuously, + // starting at the latest block. + let mut benchmark_mode = BenchMode::new(bench_args.from, bench_args.to)?; + + // construct the authenticated provider + let auth_jwt = bench_args.auth_jwtsecret.clone().ok_or_else(|| { + eyre::eyre!("--auth-jwtsecret must be provided for authenticated RPC") + })?; + + // fetch jwt from file + // + // the jwt is hex encoded so we will decode it after + let jwt = std::fs::read_to_string(auth_jwt)?; + let jwt = JwtSecret::from_hex(jwt)?; + + // get engine url + let auth_url = Url::parse(&bench_args.engine_rpc_url)?; + + // construct the authed transport + info!("Connecting to Engine RPC at {} for replay", auth_url); + let auth_transport = AuthenticatedTransportConnect::new(auth_url, jwt); + let client = ClientBuilder::default().connect_boxed(auth_transport).await?; + let auth_provider = RootProvider::<_, AnyNetwork>::new(client); + + let first_block = match benchmark_mode { + BenchMode::Continuous => { + // fetch Latest block + block_provider.get_block_by_number(BlockNumberOrTag::Latest, true).await?.unwrap() + } + BenchMode::Range(ref mut range) => { + match range.next() { + Some(block_number) => { + // fetch first block in range + block_provider + .get_block_by_number(block_number.into(), true) + .await? + .unwrap() + } + None => { + return Err(eyre::eyre!( + "Benchmark mode range is empty, please provide a larger range" + )); + } + } + } + }; + + let next_block = match first_block.header.number { + Some(number) => { + // fetch next block + number + 1 + } + None => { + // this should never happen + return Err(eyre::eyre!("First block number is None")); + } + }; + + Ok(Self { auth_provider, block_provider, benchmark_mode, next_block }) + } +} diff --git a/bin/reth-bench/src/bench/mod.rs b/bin/reth-bench/src/bench/mod.rs new file mode 100644 index 000000000000..076dbb4af7d4 --- /dev/null +++ b/bin/reth-bench/src/bench/mod.rs @@ -0,0 +1,53 @@ +//! `reth benchmark` command. Collection of various benchmarking routines. + +use clap::{Parser, Subcommand}; +use reth_cli_runner::CliContext; +use reth_node_core::args::LogArgs; +use reth_tracing::FileWorkerGuard; + +mod context; +mod new_payload_fcu; +mod new_payload_only; +mod output; + +/// `reth bench` command +#[derive(Debug, Parser)] +pub struct BenchmarkCommand { + #[command(subcommand)] + command: Subcommands, + + #[command(flatten)] + logs: LogArgs, +} + +/// `reth benchmark` subcommands +#[derive(Subcommand, Debug)] +pub enum Subcommands { + /// Benchmark which calls `newPayload`, then `forkchoiceUpdated`. + NewPayloadFcu(new_payload_fcu::Command), + + /// Benchmark which only calls subsequent `newPayload` calls. + NewPayloadOnly(new_payload_only::Command), +} + +impl BenchmarkCommand { + /// Execute `benchmark` command + pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + // Initialize tracing + let _guard = self.init_tracing()?; + + match self.command { + Subcommands::NewPayloadFcu(command) => command.execute(ctx).await, + Subcommands::NewPayloadOnly(command) => command.execute(ctx).await, + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function returns a guard that must be kept alive to ensure + /// that all logs are flushed to disk. + pub fn init_tracing(&self) -> eyre::Result> { + let guard = self.logs.init_tracing()?; + Ok(guard) + } +} diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs new file mode 100644 index 000000000000..c7ea5683175f --- /dev/null +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -0,0 +1,179 @@ +//! Runs the `reth bench` command, calling first newPayload for each block, then calling +//! forkchoiceUpdated. + +use crate::{ + bench::{ + context::BenchContext, + output::{ + CombinedResult, NewPayloadResult, TotalGasOutput, TotalGasRow, COMBINED_OUTPUT_SUFFIX, + GAS_OUTPUT_SUFFIX, + }, + }, + valid_payload::{call_forkchoice_updated, call_new_payload}, +}; +use alloy_provider::Provider; +use alloy_rpc_types_engine::ForkchoiceState; +use clap::Parser; +use csv::Writer; +use reth_cli_runner::CliContext; +use reth_node_core::args::BenchmarkArgs; +use reth_primitives::{Block, B256}; +use reth_rpc_types_compat::engine::payload::block_to_payload; +use std::time::Instant; +use tracing::{debug, info}; + +/// `reth benchmark new-payload-fcu` command +#[derive(Debug, Parser)] +pub struct Command { + /// The RPC url to use for getting data. + #[arg(long, value_name = "RPC_URL", verbatim_doc_comment)] + rpc_url: String, + + #[command(flatten)] + benchmark: BenchmarkArgs, +} + +impl Command { + /// Execute `benchmark new-payload-fcu` command + pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { + let cloned_args = self.benchmark.clone(); + let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = + BenchContext::new(&cloned_args, self.rpc_url).await?; + + let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + tokio::task::spawn(async move { + while benchmark_mode.contains(next_block) { + let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block = block_res.unwrap().unwrap(); + let block = match block.header.hash { + Some(block_hash) => { + // we can reuse the hash in the response + Block::try_from(block).unwrap().seal(block_hash) + } + None => { + // we don't have the hash, so let's just hash it + Block::try_from(block).unwrap().seal_slow() + } + }; + + let head_block_hash = block.hash(); + let safe_block_hash = + block_provider.get_block_by_number((block.number - 32).into(), false); + + let finalized_block_hash = + block_provider.get_block_by_number((block.number - 64).into(), false); + + let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); + + let safe_block_hash = safe + .unwrap() + .expect("finalized block exists") + .header + .hash + .expect("finalized block has hash"); + let finalized_block_hash = finalized + .unwrap() + .expect("finalized block exists") + .header + .hash + .expect("finalized block has hash"); + + next_block += 1; + sender + .send((block, head_block_hash, safe_block_hash, finalized_block_hash)) + .await + .unwrap(); + } + }); + + // put results in a summary vec so they can be printed at the end + let mut results = Vec::new(); + let total_benchmark_duration = Instant::now(); + + while let Some((block, head, safe, finalized)) = receiver.recv().await { + // just put gas used here + let gas_used = block.header.gas_used; + let block_number = block.header.number; + + let versioned_hashes: Vec = + block.blob_versioned_hashes().into_iter().copied().collect(); + let (payload, parent_beacon_block_root) = block_to_payload(block); + + debug!(?block_number, "Sending payload",); + + // construct fcu to call + let forkchoice_state = ForkchoiceState { + head_block_hash: head, + safe_block_hash: safe, + finalized_block_hash: finalized, + }; + + let start = Instant::now(); + let message_version = call_new_payload( + &auth_provider, + payload, + parent_beacon_block_root, + versioned_hashes, + ) + .await?; + + let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; + + call_forkchoice_updated(&auth_provider, message_version, forkchoice_state, None) + .await?; + + // calculate the total duration and the fcu latency, record + let total_latency = start.elapsed(); + let fcu_latency = total_latency - new_payload_result.latency; + let combined_result = CombinedResult { new_payload_result, fcu_latency, total_latency }; + + // current duration since the start of the benchmark + let current_duration = total_benchmark_duration.elapsed(); + + // convert gas used to gigagas, then compute gigagas per second + info!(%combined_result); + + // record the current result + let gas_row = TotalGasRow { block_number, gas_used, time: current_duration }; + results.push((gas_row, combined_result)); + } + + let (gas_output_results, combined_results): (_, Vec) = + results.into_iter().unzip(); + + // write the csv output to files + if let Some(path) = self.benchmark.output { + // first write the combined results to a file + let output_path = path.join(COMBINED_OUTPUT_SUFFIX); + info!("Writing engine api call latency output to file: {:?}", output_path); + let mut writer = Writer::from_path(output_path)?; + for result in combined_results { + writer.serialize(result)?; + } + writer.flush()?; + + // now write the gas output to a file + let output_path = path.join(GAS_OUTPUT_SUFFIX); + info!("Writing total gas output to file: {:?}", output_path); + let mut writer = Writer::from_path(output_path)?; + for row in &gas_output_results { + writer.serialize(row)?; + } + writer.flush()?; + + info!("Finished writing benchmark output files to {:?}.", path); + } + + // accumulate the results and calculate the overall Ggas/s + let gas_output = TotalGasOutput::new(gas_output_results); + info!( + total_duration=?gas_output.total_duration, + total_gas_used=?gas_output.total_gas_used, + blocks_processed=?gas_output.blocks_processed, + "Total Ggas/s: {:.4}", + gas_output.total_gigagas_per_second() + ); + + Ok(()) + } +} diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs new file mode 100644 index 000000000000..3fa85e5749ac --- /dev/null +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -0,0 +1,136 @@ +//! Runs the `reth bench` command, sending only newPayload, without a forkchoiceUpdated call. + +use crate::{ + bench::{ + context::BenchContext, + output::{ + NewPayloadResult, TotalGasOutput, TotalGasRow, GAS_OUTPUT_SUFFIX, + NEW_PAYLOAD_OUTPUT_SUFFIX, + }, + }, + valid_payload::call_new_payload, +}; +use alloy_provider::Provider; +use clap::Parser; +use csv::Writer; +use reth_cli_runner::CliContext; +use reth_node_core::args::BenchmarkArgs; +use reth_primitives::{Block, B256}; +use reth_rpc_types_compat::engine::payload::block_to_payload; +use std::time::Instant; +use tracing::{debug, info}; + +/// `reth benchmark new-payload-only` command +#[derive(Debug, Parser)] +pub struct Command { + /// The RPC url to use for getting data. + #[arg(long, value_name = "RPC_URL", verbatim_doc_comment)] + rpc_url: String, + + #[command(flatten)] + benchmark: BenchmarkArgs, +} + +impl Command { + /// Execute `benchmark new-payload-only` command + pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { + let cloned_args = self.benchmark.clone(); + // TODO: this could be just a function I guess, but destructuring makes the code slightly + // more readable than a 4 element tuple. + let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = + BenchContext::new(&cloned_args, self.rpc_url).await?; + + let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + tokio::task::spawn(async move { + while benchmark_mode.contains(next_block) { + let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block = block_res.unwrap().unwrap(); + let block = match block.header.hash { + Some(block_hash) => { + // we can reuse the hash in the response + Block::try_from(block).unwrap().seal(block_hash) + } + None => { + // we don't have the hash, so let's just hash it + Block::try_from(block).unwrap().seal_slow() + } + }; + + next_block += 1; + sender.send(block).await.unwrap(); + } + }); + + // put results in a summary vec so they can be printed at the end + let mut results = Vec::new(); + let total_benchmark_duration = Instant::now(); + + while let Some(block) = receiver.recv().await { + // just put gas used here + let gas_used = block.header.gas_used; + + let versioned_hashes: Vec = + block.blob_versioned_hashes().into_iter().copied().collect(); + let (payload, parent_beacon_block_root) = block_to_payload(block); + + let block_number = payload.block_number(); + + debug!( + number=?payload.block_number(), + "Sending payload to engine", + ); + + let start = Instant::now(); + call_new_payload(&auth_provider, payload, parent_beacon_block_root, versioned_hashes) + .await?; + + let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; + info!(%new_payload_result); + + // current duration since the start of the benchmark + let current_duration = total_benchmark_duration.elapsed(); + + // record the current result + let row = TotalGasRow { block_number, gas_used, time: current_duration }; + results.push((row, new_payload_result)); + } + + let (gas_output_results, new_payload_results): (_, Vec) = + results.into_iter().unzip(); + + // write the csv output to files + if let Some(path) = self.benchmark.output { + // first write the new payload results to a file + let output_path = path.join(NEW_PAYLOAD_OUTPUT_SUFFIX); + info!("Writing newPayload call latency output to file: {:?}", output_path); + let mut writer = Writer::from_path(output_path)?; + for result in new_payload_results { + writer.serialize(result)?; + } + writer.flush()?; + + // now write the gas output to a file + let output_path = path.join(GAS_OUTPUT_SUFFIX); + info!("Writing total gas output to file: {:?}", output_path); + let mut writer = Writer::from_path(output_path)?; + for row in &gas_output_results { + writer.serialize(row)?; + } + writer.flush()?; + + info!("Finished writing benchmark output files to {:?}.", path); + } + + // accumulate the results and calculate the overall Ggas/s + let gas_output = TotalGasOutput::new(gas_output_results); + info!( + total_duration=?gas_output.total_duration, + total_gas_used=?gas_output.total_gas_used, + blocks_processed=?gas_output.blocks_processed, + "Total Ggas/s: {:.4}", + gas_output.total_gigagas_per_second() + ); + + Ok(()) + } +} diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs new file mode 100644 index 000000000000..83103418d929 --- /dev/null +++ b/bin/reth-bench/src/bench/output.rs @@ -0,0 +1,206 @@ +//! Contains various benchmark output formats, either for logging or for +//! serialization to / from files. + +use reth_primitives::constants::gas_units::GIGAGAS; +use serde::{ser::SerializeStruct, Serialize}; +use std::time::Duration; + +/// This is the suffix for gas output csv files. +pub(crate) const GAS_OUTPUT_SUFFIX: &str = "total_gas.csv"; + +/// This is the suffix for combined output csv files. +pub(crate) const COMBINED_OUTPUT_SUFFIX: &str = "combined_latency.csv"; + +/// This is the suffix for new payload output csv files. +pub(crate) const NEW_PAYLOAD_OUTPUT_SUFFIX: &str = "new_payload_latency.csv"; + +/// This represents the results of a single `newPayload` call in the benchmark, containing the gas +/// used and the `newPayload` latency. +#[derive(Debug)] +pub(crate) struct NewPayloadResult { + /// The gas used in the `newPayload` call. + pub(crate) gas_used: u64, + /// The latency of the `newPayload` call. + pub(crate) latency: Duration, +} + +impl NewPayloadResult { + /// Returns the gas per second processed in the `newPayload` call. + pub(crate) fn gas_per_second(&self) -> f64 { + self.gas_used as f64 / self.latency.as_secs_f64() + } +} + +impl std::fmt::Display for NewPayloadResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "New payload processed at {:.4} Ggas/s, used {} total gas. Latency: {:?}", + self.gas_per_second() / GIGAGAS as f64, + self.gas_used, + self.latency + ) + } +} + +/// This is another [`Serialize`] implementation for the [`NewPayloadResult`] struct, serializing +/// the duration as microseconds because the csv writer would fail otherwise. +impl Serialize for NewPayloadResult { + fn serialize(&self, serializer: S) -> Result + where + S: serde::ser::Serializer, + { + // convert the time to microseconds + let time = self.latency.as_micros(); + let mut state = serializer.serialize_struct("NewPayloadResult", 3)?; + state.serialize_field("gas_used", &self.gas_used)?; + state.serialize_field("latency", &time)?; + state.end() + } +} + +/// This represents the combined results of a `newPayload` call and a `forkchoiceUpdated` call in +/// the benchmark, containing the gas used, the `newPayload` latency, and the `forkchoiceUpdated` +/// latency. +#[derive(Debug)] +pub(crate) struct CombinedResult { + /// The `newPayload` result. + pub(crate) new_payload_result: NewPayloadResult, + /// The latency of the `forkchoiceUpdated` call. + pub(crate) fcu_latency: Duration, + /// The latency of both calls combined. + pub(crate) total_latency: Duration, +} + +impl CombinedResult { + /// Returns the gas per second, including the `newPayload` _and_ `forkchoiceUpdated` duration. + pub(crate) fn combined_gas_per_second(&self) -> f64 { + self.new_payload_result.gas_used as f64 / self.total_latency.as_secs_f64() + } +} + +impl std::fmt::Display for CombinedResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Payload processed at {:.4} Ggas/s, used {} total gas. Combined gas per second: {:.4} Ggas/s. fcu latency: {:?}, newPayload latency: {:?}", + self.new_payload_result.gas_per_second() / GIGAGAS as f64, + self.new_payload_result.gas_used, + self.combined_gas_per_second() / GIGAGAS as f64, + self.fcu_latency, + self.new_payload_result.latency + ) + } +} + +/// This is a [`Serialize`] implementation for the [`CombinedResult`] struct, serializing the +/// durations as microseconds because the csv writer would fail otherwise. +impl Serialize for CombinedResult { + fn serialize(&self, serializer: S) -> Result + where + S: serde::ser::Serializer, + { + // convert the time to microseconds + let fcu_latency = self.fcu_latency.as_micros(); + let new_payload_latency = self.new_payload_result.latency.as_micros(); + let total_latency = self.total_latency.as_micros(); + let mut state = serializer.serialize_struct("CombinedResult", 4)?; + + // flatten the new payload result because this is meant for CSV writing + state.serialize_field("gas_used", &self.new_payload_result.gas_used)?; + state.serialize_field("new_payload_latency", &new_payload_latency)?; + state.serialize_field("fcu_latency", &fcu_latency)?; + state.serialize_field("total_latency", &total_latency)?; + state.end() + } +} + +/// This represents a row of total gas data in the benchmark. +#[derive(Debug)] +pub(crate) struct TotalGasRow { + /// The block number of the block being processed. + #[allow(dead_code)] + pub(crate) block_number: u64, + /// The total gas used in the block. + pub(crate) gas_used: u64, + /// Time since the start of the benchmark. + pub(crate) time: Duration, +} + +/// This represents the aggregated output, meant to show gas per second metrics, of a benchmark run. +#[derive(Debug)] +pub(crate) struct TotalGasOutput { + /// The total gas used in the benchmark. + pub(crate) total_gas_used: u64, + /// The total duration of the benchmark. + pub(crate) total_duration: Duration, + /// The total gas used per second. + pub(crate) total_gas_per_second: f64, + /// The number of blocks processed. + pub(crate) blocks_processed: u64, +} + +impl TotalGasOutput { + /// Create a new [`TotalGasOutput`] from a list of [`TotalGasRow`]. + pub(crate) fn new(rows: Vec) -> Self { + // the duration is obtained from the last row + let total_duration = + rows.last().map(|row| row.time).expect("the row has at least one element"); + let blocks_processed = rows.len() as u64; + let total_gas_used: u64 = rows.into_iter().map(|row| row.gas_used).sum(); + let total_gas_per_second = total_gas_used as f64 / total_duration.as_secs_f64(); + + Self { total_gas_used, total_duration, total_gas_per_second, blocks_processed } + } + + /// Return the total gigagas per second. + pub(crate) fn total_gigagas_per_second(&self) -> f64 { + self.total_gas_per_second / GIGAGAS as f64 + } +} + +/// This serializes the `time` field of the [`TotalGasRow`] to microseconds. +/// +/// This is essentially just for the csv writer, which would have headers +impl Serialize for TotalGasRow { + fn serialize(&self, serializer: S) -> Result + where + S: serde::ser::Serializer, + { + // convert the time to microseconds + let time = self.time.as_micros(); + let mut state = serializer.serialize_struct("TotalGasRow", 3)?; + state.serialize_field("block_number", &self.block_number)?; + state.serialize_field("gas_used", &self.gas_used)?; + state.serialize_field("time", &time)?; + state.end() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use csv::Writer; + use std::io::BufRead; + + #[test] + fn test_write_total_gas_row_csv() { + let row = TotalGasRow { block_number: 1, gas_used: 1_000, time: Duration::from_secs(1) }; + + let mut writer = Writer::from_writer(vec![]); + writer.serialize(row).unwrap(); + let result = writer.into_inner().unwrap(); + + // parse into Lines + let mut result = result.as_slice().lines(); + + // assert header + let expected_first_line = "block_number,gas_used,time"; + let first_line = result.next().unwrap().unwrap(); + assert_eq!(first_line, expected_first_line); + + let expected_second_line = "1,1000,1000000"; + let second_line = result.next().unwrap().unwrap(); + assert_eq!(second_line, expected_second_line); + } +} diff --git a/bin/reth-bench/src/bench_mode.rs b/bin/reth-bench/src/bench_mode.rs new file mode 100644 index 000000000000..ae66ba53822f --- /dev/null +++ b/bin/reth-bench/src/bench_mode.rs @@ -0,0 +1,37 @@ +//! The benchmark mode defines whether the benchmark should run for a closed or open range of +//! blocks. +use std::ops::RangeInclusive; + +/// Whether or not the benchmark should run as a continuous stream of payloads. +#[derive(Debug, PartialEq, Eq)] +pub enum BenchMode { + // TODO: just include the start block in `Continuous` + /// Run the benchmark as a continuous stream of payloads, until the benchmark is interrupted. + Continuous, + /// Run the benchmark for a specific range of blocks. + Range(RangeInclusive), +} + +impl BenchMode { + /// Check if the block number is in the range + pub fn contains(&self, block_number: u64) -> bool { + match self { + Self::Continuous => true, + Self::Range(range) => range.contains(&block_number), + } + } + + /// Create a [`BenchMode`] from optional `from` and `to` fields. + pub fn new(from: Option, to: Option) -> Result { + // If neither `--from` nor `--to` are provided, we will run the benchmark continuously, + // starting at the latest block. + match (from, to) { + (Some(from), Some(to)) => Ok(Self::Range(from..=to)), + (None, None) => Ok(Self::Continuous), + _ => { + // both or neither are allowed, everything else is ambiguous + Err(eyre::eyre!("`from` and `to` must be provided together, or not at all.")) + } + } + } +} diff --git a/bin/reth-bench/src/main.rs b/bin/reth-bench/src/main.rs new file mode 100644 index 000000000000..8cb7dbd07b91 --- /dev/null +++ b/bin/reth-bench/src/main.rs @@ -0,0 +1,34 @@ +//! # reth-benchmark +//! +//! This is a tool that converts existing blocks into a stream of blocks for benchmarking purposes. +//! These blocks are then fed into reth as a stream of execution payloads. + +// We use jemalloc for performance reasons. +#[cfg(all(feature = "jemalloc", unix))] +#[global_allocator] +static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +pub mod authenticated_transport; +pub mod bench; +pub mod bench_mode; +pub mod valid_payload; + +use bench::BenchmarkCommand; +use clap::Parser; +use reth_cli_runner::CliRunner; + +fn main() { + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + std::env::set_var("RUST_BACKTRACE", "1"); + } + + // Run until either exit or sigint or sigterm + let runner = CliRunner::default(); + runner + .run_command_until_exit(|ctx| { + let command = BenchmarkCommand::parse(); + command.execute(ctx) + }) + .unwrap(); +} diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs new file mode 100644 index 000000000000..4c96c43b18cf --- /dev/null +++ b/bin/reth-bench/src/valid_payload.rs @@ -0,0 +1,275 @@ +//! This is an extension trait for any provider that implements the engine API, to wait for a VALID +//! response. This is useful for benchmarking, as it allows us to wait for a payload to be valid +//! before sending additional calls. + +use alloy_provider::{ext::EngineApi, Network}; +use alloy_rpc_types_engine::{ + ExecutionPayloadInputV2, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, +}; +use alloy_transport::{Transport, TransportResult}; +use reth_node_api::EngineApiMessageVersion; +use reth_primitives::B256; +use reth_rpc_types::{ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV3}; +use tracing::error; + +/// An extension trait for providers that implement the engine API, to wait for a VALID response. +#[async_trait::async_trait] +pub trait EngineApiValidWaitExt: Send + Sync { + /// Calls `engine_newPayloadV1` with the given [ExecutionPayloadV1], and waits until the + /// response is VALID. + async fn new_payload_v1_wait( + &self, + payload: ExecutionPayloadV1, + ) -> TransportResult; + + /// Calls `engine_newPayloadV2` with the given [ExecutionPayloadInputV2], and waits until the + /// response is VALID. + async fn new_payload_v2_wait( + &self, + payload: ExecutionPayloadInputV2, + ) -> TransportResult; + + /// Calls `engine_newPayloadV3` with the given [ExecutionPayloadV3], parent beacon block root, + /// and versioned hashes, and waits until the response is VALID. + async fn new_payload_v3_wait( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> TransportResult; + + /// Calls `engine_forkChoiceUpdatedV1` with the given [ForkchoiceState] and optional + /// [PayloadAttributes], and waits until the response is VALID. + async fn fork_choice_updated_v1_wait( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> TransportResult; + + /// Calls `engine_forkChoiceUpdatedV2` with the given [ForkchoiceState] and optional + /// [PayloadAttributes], and waits until the response is VALID. + async fn fork_choice_updated_v2_wait( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> TransportResult; + + /// Calls `engine_forkChoiceUpdatedV3` with the given [ForkchoiceState] and optional + /// [PayloadAttributes], and waits until the response is VALID. + async fn fork_choice_updated_v3_wait( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> TransportResult; +} + +#[async_trait::async_trait] +impl EngineApiValidWaitExt for P +where + N: Network, + T: Transport + Clone, + P: EngineApi, +{ + async fn new_payload_v1_wait( + &self, + payload: ExecutionPayloadV1, + ) -> TransportResult { + let mut status = self.new_payload_v1(payload.clone()).await?; + while !status.is_valid() { + if status.is_invalid() { + error!(?status, ?payload, "Invalid newPayloadV1",); + panic!("Invalid newPayloadV1: {status:?}"); + } + status = self.new_payload_v1(payload.clone()).await?; + } + Ok(status) + } + + async fn new_payload_v2_wait( + &self, + payload: ExecutionPayloadInputV2, + ) -> TransportResult { + let mut status = self.new_payload_v2(payload.clone()).await?; + while !status.is_valid() { + if status.is_invalid() { + error!(?status, ?payload, "Invalid newPayloadV2",); + panic!("Invalid newPayloadV2: {status:?}"); + } + status = self.new_payload_v2(payload.clone()).await?; + } + Ok(status) + } + + async fn new_payload_v3_wait( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> TransportResult { + let mut status = self + .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) + .await?; + while !status.is_valid() { + if status.is_invalid() { + error!( + ?status, + ?payload, + ?versioned_hashes, + ?parent_beacon_block_root, + "Invalid newPayloadV3", + ); + panic!("Invalid newPayloadV3: {status:?}"); + } + status = self + .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) + .await?; + } + Ok(status) + } + + async fn fork_choice_updated_v1_wait( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> TransportResult { + let mut status = + self.fork_choice_updated_v1(fork_choice_state, payload_attributes.clone()).await?; + + while !status.is_valid() { + if status.is_invalid() { + error!( + ?status, + ?fork_choice_state, + ?payload_attributes, + "Invalid forkchoiceUpdatedV1 message", + ); + panic!("Invalid forkchoiceUpdatedV1: {status:?}"); + } + status = + self.fork_choice_updated_v1(fork_choice_state, payload_attributes.clone()).await?; + } + + Ok(status) + } + + async fn fork_choice_updated_v2_wait( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> TransportResult { + let mut status = + self.fork_choice_updated_v2(fork_choice_state, payload_attributes.clone()).await?; + + while !status.is_valid() { + if status.is_invalid() { + error!( + ?status, + ?fork_choice_state, + ?payload_attributes, + "Invalid forkchoiceUpdatedV2 message", + ); + panic!("Invalid forkchoiceUpdatedV2: {status:?}"); + } + status = + self.fork_choice_updated_v2(fork_choice_state, payload_attributes.clone()).await?; + } + + Ok(status) + } + + async fn fork_choice_updated_v3_wait( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> TransportResult { + let mut status = + self.fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()).await?; + + while !status.is_valid() { + if status.is_invalid() { + error!( + ?status, + ?fork_choice_state, + ?payload_attributes, + "Invalid forkchoiceUpdatedV3 message", + ); + panic!("Invalid forkchoiceUpdatedV3: {status:?}"); + } + status = + self.fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()).await?; + } + + Ok(status) + } +} + +/// Calls the correct `engine_newPayload` method depending on the given [`ExecutionPayload`] and its +/// versioned variant. Returns the [`EngineApiMessageVersion`] depending on the payload's version. +/// +/// # Panics +/// If the given payload is a V3 payload, but a parent beacon block root is provided as `None`. +pub(crate) async fn call_new_payload>( + provider: P, + payload: ExecutionPayload, + parent_beacon_block_root: Option, + versioned_hashes: Vec, +) -> TransportResult { + match payload { + ExecutionPayload::V4(_payload) => { + todo!("V4 payloads not supported yet"); + // auth_provider + // .new_payload_v4_wait(payload, versioned_hashes, parent_beacon_block_root, ...) + // .await?; + // + // Ok(EngineApiMessageVersion::V4) + } + ExecutionPayload::V3(payload) => { + // We expect the caller + let parent_beacon_block_root = parent_beacon_block_root + .expect("parent_beacon_block_root is required for V3 payloads"); + provider + .new_payload_v3_wait(payload, versioned_hashes, parent_beacon_block_root) + .await?; + + Ok(EngineApiMessageVersion::V3) + } + ExecutionPayload::V2(payload) => { + let input = ExecutionPayloadInputV2 { + execution_payload: payload.payload_inner, + withdrawals: Some(payload.withdrawals), + }; + + provider.new_payload_v2_wait(input).await?; + + Ok(EngineApiMessageVersion::V2) + } + ExecutionPayload::V1(payload) => { + provider.new_payload_v1_wait(payload).await?; + + Ok(EngineApiMessageVersion::V1) + } + } +} + +/// Calls the correct `engine_forkchoiceUpdated` method depending on the given +/// `EngineApiMessageVersion`, using the provided forkchoice state and payload attributes for the +/// actual engine api message call. +pub(crate) async fn call_forkchoice_updated>( + provider: P, + message_version: EngineApiMessageVersion, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, +) -> TransportResult { + match message_version { + EngineApiMessageVersion::V4 => todo!("V4 payloads not supported yet"), + EngineApiMessageVersion::V3 => { + provider.fork_choice_updated_v3_wait(forkchoice_state, payload_attributes).await + } + EngineApiMessageVersion::V2 => { + provider.fork_choice_updated_v2_wait(forkchoice_state, payload_attributes).await + } + EngineApiMessageVersion::V1 => { + provider.fork_choice_updated_v1_wait(forkchoice_state, payload_attributes).await + } + } +} diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 38410737ecfb..d67436121e7c 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -14,20 +14,24 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-config.workspace = true -reth-primitives = { workspace = true, features = ["arbitrary", "clap"] } +reth-primitives.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true reth-exex.workspace = true -reth-provider = { workspace = true } +reth-provider.workspace = true reth-evm.workspace = true reth-revm.workspace = true reth-stages.workspace = true +reth-execution-types.workspace = true reth-errors.workspace = true reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true reth-cli-runner.workspace = true +reth-cli-commands.workspace = true +reth-cli-util.workspace = true reth-consensus-common.workspace = true reth-blockchain-tree.workspace = true reth-rpc-builder.workspace = true @@ -35,35 +39,37 @@ reth-rpc.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true -reth-net-common.workspace = true reth-network-api.workspace = true reth-downloaders.workspace = true reth-tracing.workspace = true reth-tasks.workspace = true -reth-ethereum-payload-builder.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true -reth-discv4.workspace = true -reth-discv5.workspace = true -reth-static-file = { workspace = true } +reth-static-file.workspace = true +reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } -reth-nippy-jar.workspace = true reth-node-api.workspace = true -reth-node-ethereum.workspace = true reth-node-optimism = { workspace = true, optional = true, features = [ "optimism", ] } reth-node-core.workspace = true +reth-ethereum-payload-builder.workspace = true reth-db-common.workspace = true +reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true reth-optimism-primitives.workspace = true -reth-prune-types.workspace = true +reth-engine-util.workspace = true +reth-prune.workspace = true +reth-stages-api.workspace = true +reth-optimism-cli = { workspace = true, optional = true } # crypto alloy-rlp.workspace = true @@ -81,18 +87,6 @@ toml = { workspace = true, features = ["display"] } # metrics metrics-process.workspace = true -# test vectors generation -proptest.workspace = true -rand.workspace = true - -# tui -comfy-table = "7.0" -crossterm = "0.27.0" -ratatui = { version = "0.26", default-features = false, features = [ - "crossterm", -] } -human_bytes = "0.4.1" - # async tokio = { workspace = true, features = [ "sync", @@ -110,9 +104,6 @@ tempfile.workspace = true backon.workspace = true similar-asserts.workspace = true itertools.workspace = true -rayon.workspace = true -boyer-moore-magiclen = "0.2.16" -ahash = "0.8" # p2p discv5.workspace = true @@ -122,12 +113,14 @@ tikv-jemallocator = { version = "0.5.0", optional = true } libc = "0.2" [dev-dependencies] -jsonrpsee.workspace = true -assert_matches = "1.5.0" +reth-discv4.workspace = true + [features] default = ["jemalloc"] +dev = ["reth-cli-commands/dev"] + asm-keccak = ["reth-primitives/asm-keccak"] jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] @@ -140,6 +133,8 @@ min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ + "dep:reth-optimism-cli", + "reth-optimism-cli?/optimism", "reth-primitives/optimism", "reth-rpc/optimism", "reth-provider/optimism", @@ -147,6 +142,7 @@ optimism = [ "reth-blockchain-tree/optimism", "dep:reth-node-optimism", "reth-node-core/optimism", + "reth-rpc-eth-types/optimism", ] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 033abdd377d5..e369307267b8 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -2,23 +2,27 @@ use crate::{ args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, + debug_cmd, import, node::{self, NoArgs}, - p2p, recover, stage, test_vectors, }, + macros::block_executor, version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{value_parser, Parser, Subcommand}; +use reth_chainspec::ChainSpec; +use reth_cli_commands::{ + config_cmd, db, dump_genesis, init_cmd, init_state, p2p, prune, recover, stage, +}; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_primitives::ChainSpec; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; +use tracing::info; /// Re-export of the `reth_node_core` types specifically in the `cli` module. /// @@ -45,7 +49,7 @@ pub struct Cli { value_name = "CHAIN_OR_PATH", long_help = chain_help(), default_value = SUPPORTED_CHAINS[0], - value_parser = genesis_value_parser, + value_parser = chain_value_parser, global = true, )] chain: Arc, @@ -139,6 +143,7 @@ impl Cli { self.logs.log_file_directory.join(self.chain.chain.to_string()); let _guard = self.init_tracing()?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); let runner = CliRunner::default(); match self.command { @@ -156,12 +161,16 @@ impl Cli { } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Stage(command) => runner.run_command_until_exit(|ctx| { + command.execute(ctx, |chain_spec| block_executor!(chain_spec)) + }), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), + #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Debug(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), Commands::Recover(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute()), } } @@ -193,11 +202,11 @@ pub enum Commands { /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. #[cfg(feature = "optimism")] #[command(name = "import-op")] - ImportOp(crate::commands::import_op::ImportOpCommand), + ImportOp(reth_optimism_cli::ImportOpCommand), /// This imports RLP encoded receipts from a file. #[cfg(feature = "optimism")] #[command(name = "import-receipts-op")] - ImportReceiptsOp(crate::commands::import_receipts_op::ImportReceiptsOpCommand), + ImportReceiptsOp(reth_optimism_cli::ImportReceiptsOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities @@ -210,8 +219,9 @@ pub enum Commands { #[command(name = "p2p")] P2P(p2p::Command), /// Generate Test Vectors + #[cfg(feature = "dev")] #[command(name = "test-vectors")] - TestVectors(test_vectors::Command), + TestVectors(reth_cli_commands::test_vectors::Command), /// Write config to stdout #[command(name = "config")] Config(config_cmd::Command), @@ -221,6 +231,9 @@ pub enum Commands { /// Scripts for node recovery #[command(name = "recover")] Recover(recover::Command), + /// Prune according to the configuration without any limits + #[command(name = "prune")] + Prune(prune::PruneCommand), } #[cfg(test)] diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 119a6d98f642..afaf799649c9 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,9 +1,5 @@ //! Command for debugging block building. - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, -}; +use crate::macros::block_executor; use alloy_rlp::Decodable; use clap::Parser; use eyre::Context; @@ -14,25 +10,27 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_db::DatabaseEnv; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; +use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::database::CachedReads; use reth_primitives::{ - constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, - revm_primitives::KzgSettings, - Address, BlobTransaction, BlobTransactionSidecar, Bytes, PooledTransactionsElement, - SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, + constants::eip4844::LoadKzgSettingsError, revm_primitives::KzgSettings, Address, + BlobTransaction, BlobTransactionSidecar, Bytes, PooledTransactionsElement, SealedBlock, + SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, - ExecutionOutcome, ProviderFactory, StageCheckpointReader, StateProviderFactory, + ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::database::StateProviderDatabase; +use reth_prune::PruneModes; +use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; use reth_stages::StageId; use reth_transaction_pool::{ @@ -102,14 +100,14 @@ impl Command { } /// Loads the trusted setup params from a given file path or falls back to - /// `MAINNET_KZG_TRUSTED_SETUP`. - fn kzg_settings(&self) -> eyre::Result> { + /// `EnvKzgSettings::Default`. + fn kzg_settings(&self) -> eyre::Result { if let Some(ref trusted_setup_file) = self.trusted_setup_file { let trusted_setup = KzgSettings::load_trusted_setup_file(trusted_setup_file) .map_err(LoadKzgSettingsError::KzgError)?; - Ok(Arc::new(trusted_setup)) + Ok(EnvKzgSettings::Custom(Arc::new(trusted_setup))) } else { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + Ok(EnvKzgSettings::Default) } } @@ -125,7 +123,11 @@ impl Command { // configure blockchain tree let tree_externals = TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; + let tree = BlockchainTree::new( + tree_externals, + BlockchainTreeConfig::default(), + PruneModes::none(), + )?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // fetch the best block from the database @@ -301,7 +303,6 @@ impl Command { execution_outcome, hashed_post_state, trie_updates, - None, )?; info!(target: "reth::cli", "Successfully appended built block"); } diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index d27bf191cf07..5751771041c9 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,15 +1,12 @@ //! Command for debugging execution. -use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - utils::get_single_header, -}; +use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::DatabaseEnv; @@ -24,17 +21,16 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_provider::{ - BlockExecutionWriter, ChainSpecProvider, HeaderSyncMode, ProviderFactory, StageCheckpointReader, + BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{ - sets::DefaultStages, - stages::{ExecutionStage, ExecutionStageThresholds}, - Pipeline, StageId, StageSet, + sets::DefaultStages, stages::ExecutionStage, ExecutionStageThresholds, Pipeline, StageId, + StageSet, }; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::*; @@ -86,13 +82,12 @@ impl Command { let (tip_tx, tip_rx) = watch::channel(B256::ZERO); let executor = block_executor!(provider_factory.chain_spec()); - let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) .add_stages( DefaultStages::new( provider_factory.clone(), - header_mode, + tip_rx, Arc::clone(&consensus), header_downloader, body_downloader, @@ -131,11 +126,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -214,7 +204,7 @@ impl Command { ctx.task_executor.spawn_critical( "events task", reth_node_events::node::handle_events( - Some(network.clone()), + Some(Box::new(network)), latest_block_number, events, provider_factory.db_ref().clone(), diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 8b2c0e867ea9..74190bf7783b 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -1,31 +1,33 @@ //! Command for debugging in-memory merkle trie calculation. use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, + args::NetworkArgs, macros::block_executor, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_db::DatabaseEnv; use reth_errors::BlockValidationError; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; +use reth_execution_types::ExecutionOutcome; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - AccountExtReader, ChainSpecProvider, ExecutionOutcome, HashingWriter, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StateWriter, StaticFileProviderFactory, StorageReader, + AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, + StaticFileProviderFactory, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; use reth_tasks::TaskExecutor; -use reth_trie::{updates::TrieKey, StateRoot}; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use reth_trie::StateRoot; +use std::{path::PathBuf, sync::Arc}; use tracing::*; /// `reth debug in-memory-merkle` command @@ -63,11 +65,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -167,7 +164,6 @@ impl Command { .clone() .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, - None, )?; execution_outcome.write_to_storage(provider_rw.tx_ref(), None, OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; @@ -192,15 +188,16 @@ impl Command { // Compare updates let mut in_mem_mismatched = Vec::new(); let mut incremental_mismatched = Vec::new(); - let mut in_mem_updates_iter = in_memory_updates.into_iter().peekable(); - let mut incremental_updates_iter = incremental_trie_updates.into_iter().peekable(); + let mut in_mem_updates_iter = in_memory_updates.account_nodes_ref().iter().peekable(); + let mut incremental_updates_iter = + incremental_trie_updates.account_nodes_ref().iter().peekable(); while in_mem_updates_iter.peek().is_some() || incremental_updates_iter.peek().is_some() { match (in_mem_updates_iter.next(), incremental_updates_iter.next()) { (Some(in_mem), Some(incr)) => { similar_asserts::assert_eq!(in_mem.0, incr.0, "Nibbles don't match"); if in_mem.1 != incr.1 && - matches!(in_mem.0, TrieKey::AccountNode(ref nibbles) if nibbles.0.len() > self.skip_node_depth.unwrap_or_default()) + in_mem.0.len() > self.skip_node_depth.unwrap_or_default() { in_mem_mismatched.push(in_mem); incremental_mismatched.push(incr); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index d3b0987ca3c6..8ff0b611b38b 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,15 +1,11 @@ //! Command for debugging merkle trie calculation. - -use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - utils::get_single_header, -}; +use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::{tables, DatabaseEnv}; @@ -23,14 +19,13 @@ use reth_provider::{ BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; -use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages::{ stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, ExecInput, Stage, StageCheckpoint, }; use reth_tasks::TaskExecutor; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; use tracing::*; /// `reth debug merkle` command @@ -69,11 +64,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -150,16 +140,15 @@ impl Command { .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); - provider_rw.insert_block(sealed_block.clone(), None)?; + provider_rw.insert_block(sealed_block.clone())?; td += sealed_block.difficulty; - let mut executor = executor_provider.batch_executor( - StateProviderDatabase::new(LatestStateProviderRef::new( + let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( + LatestStateProviderRef::new( provider_rw.tx_ref(), provider_rw.static_file_provider().clone(), - )), - PruneModes::none(), - ); + ), + )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; executor.finalize().write_to_storage( provider_rw.tx_ref(), @@ -167,7 +156,9 @@ impl Command { OriginalValuesKnown::Yes, )?; - let checkpoint = Some(StageCheckpoint::new(block_number - 1)); + let checkpoint = Some(StageCheckpoint::new( + block_number.checked_sub(1).ok_or(eyre::eyre!("GenesisBlockHasNoParent"))?, + )); let mut account_hashing_done = false; while !account_hashing_done { diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 7d8e179d2fb6..cc31c562f6ff 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -1,8 +1,4 @@ -use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, -}; +use crate::{args::NetworkArgs, macros::block_executor}; use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; @@ -10,24 +6,26 @@ use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeacon use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::DatabaseEnv; +use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_fs_util as fs; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_transaction_pool::noop::NoopTransactionPool; -use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; +use std::{path::PathBuf, sync::Arc, time::Duration}; use tokio::sync::oneshot; use tracing::*; @@ -65,11 +63,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -90,7 +83,11 @@ impl Command { // Configure blockchain tree let tree_externals = TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; + let tree = BlockchainTree::new( + tree_externals, + BlockchainTreeConfig::default(), + PruneModes::none(), + )?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Set up the blockchain provider @@ -149,13 +146,12 @@ impl Command { network_client, Pipeline::builder().build( provider_factory.clone(), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + StaticFileProducer::new(provider_factory.clone(), PruneModes::none()), ), blockchain_db.clone(), Box::new(ctx.task_executor.clone()), Box::new(network), None, - false, payload_builder, None, u64::MAX, diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 156e8f4d23ca..f4810f05148b 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -1,13 +1,9 @@ //! Command that initializes the node by importing a chain from a file. - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - version::SHORT_VERSION, -}; +use crate::{macros::block_executor, version::SHORT_VERSION}; use clap::Parser; use futures::{Stream, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_config::Config; use reth_consensus::Consensus; use reth_db::tables; @@ -24,10 +20,10 @@ use reth_network_p2p::{ use reth_node_events::node::NodeEvent; use reth_primitives::B256; use reth_provider::{ - BlockNumReader, ChainSpecProvider, HeaderProvider, HeaderSyncMode, ProviderError, - ProviderFactory, StageCheckpointReader, + BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, ProviderFactory, + StageCheckpointReader, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -100,8 +96,7 @@ impl ImportCommand { Arc::new(file_client), StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), self.no_state, - ) - .await?; + )?; // override the tip pipeline.set_tip(tip); @@ -157,7 +152,7 @@ impl ImportCommand { /// /// If configured to execute, all stages will run. Otherwise, only stages that don't require state /// will run. -pub async fn build_import_pipeline( +pub fn build_import_pipeline( config: &Config, provider_factory: ProviderFactory, consensus: &Arc, @@ -208,7 +203,7 @@ where .add_stages( DefaultStages::new( provider_factory.clone(), - HeaderSyncMode::Tip(tip_rx), + tip_rx, consensus.clone(), header_downloader, body_downloader, @@ -237,7 +232,7 @@ mod tests { let args: ImportCommand = ImportCommand::parse_from(["reth", "--chain", chain, "."]); assert_eq!( Ok(args.env.chain.chain), - chain.parse::(), + chain.parse::(), "failed to parse chain {chain}" ); } diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index cd5a7e7ba6a3..cf1b79be59c5 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -1,20 +1,5 @@ //! This contains all of the `reth` commands -pub mod config_cmd; -pub mod db; pub mod debug_cmd; -pub mod dump_genesis; pub mod import; -pub mod import_op; -pub mod import_receipts_op; - -pub mod init_cmd; -pub mod init_state; - pub mod node; -pub mod p2p; -pub mod recover; -pub mod stage; -pub mod test_vectors; - -pub mod common; diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index f11cb24ce682..ee3c6da74223 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -1,16 +1,17 @@ //! Main node command for launching a node use crate::args::{ - utils::{chain_help, genesis_value_parser, parse_socket_address, SUPPORTED_CHAINS}, + utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }; use clap::{value_parser, Args, Parser}; +use reth_chainspec::ChainSpec; use reth_cli_runner::CliContext; +use reth_cli_util::parse_socket_address; use reth_db::{init_db, DatabaseEnv}; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{node_config::NodeConfig, version}; -use reth_primitives::ChainSpec; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; /// Start the node @@ -29,7 +30,7 @@ pub struct NodeCommand { long_help = chain_help(), default_value = SUPPORTED_CHAINS[0], default_value_if("dev", "true", "dev"), - value_parser = genesis_value_parser, + value_parser = chain_value_parser, required = false, )] pub chain: Arc, @@ -213,7 +214,7 @@ mod tests { fn parse_common_node_command_chain_args() { for chain in SUPPORTED_CHAINS { let args: NodeCommand = NodeCommand::::parse_from(["reth", "--chain", chain]); - assert_eq!(args.chain.chain, chain.parse::().unwrap()); + assert_eq!(args.chain.chain, chain.parse::().unwrap()); } } @@ -305,7 +306,7 @@ mod tests { #[cfg(not(feature = "optimism"))] // dev mode not yet supported in op-reth fn parse_dev() { let cmd = NodeCommand::::parse_from(["reth", "--dev"]); - let chain = reth_primitives::DEV.clone(); + let chain = reth_chainspec::DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); assert_eq!( diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 7c024438ae27..c725b033b2c0 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -32,7 +32,15 @@ pub mod cli; pub mod commands; mod macros; -pub mod utils; + +/// Re-exported utils. +pub mod utils { + pub use reth_db::open_db_read_only; + + /// Re-exported from `reth_node_core`, also to prevent a breaking change. See the comment + /// on the `reth_node_core::args` re-export for more details. + pub use reth_node_core::utils::*; +} /// Re-exported payload related types pub mod payload { @@ -140,6 +148,15 @@ pub mod rpc { pub use reth_rpc_types::*; } + /// Re-exported from `reth_rpc_server_types`. + pub mod server_types { + pub use reth_rpc_server_types::*; + /// Re-exported from `reth_rpc_eth_types`. + pub mod eth { + pub use reth_rpc_eth_types::*; + } + } + /// Re-exported from `reth_rpc_api`. pub mod api { pub use reth_rpc_api::*; @@ -151,10 +168,10 @@ pub mod rpc { /// Re-exported from `reth_rpc::rpc`. pub mod result { - pub use reth_rpc::result::*; + pub use reth_rpc_server_types::result::*; } - /// Re-exported from `reth_rpc::eth`. + /// Re-exported from `reth_rpc_types_compat`. pub mod compat { pub use reth_rpc_types_compat::*; } diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 473a389dae6a..af03aa32a849 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -61,8 +61,6 @@ - [`reth p2p`](./cli/reth/p2p.md) - [`reth p2p header`](./cli/reth/p2p/header.md) - [`reth p2p body`](./cli/reth/p2p/body.md) - - [`reth test-vectors`](./cli/reth/test-vectors.md) - - [`reth test-vectors tables`](./cli/reth/test-vectors/tables.md) - [`reth config`](./cli/reth/config.md) - [`reth debug`](./cli/reth/debug.md) - [`reth debug execution`](./cli/reth/debug/execution.md) @@ -72,5 +70,11 @@ - [`reth debug replay-engine`](./cli/reth/debug/replay-engine.md) - [`reth recover`](./cli/reth/recover.md) - [`reth recover storage-tries`](./cli/reth/recover/storage-tries.md) + - [`reth prune`](./cli/reth/prune.md) - [Developers](./developers/developers.md) + - [Execution Extensions](./developers/exex/exex.md) + - [How do ExExes work?](./developers/exex/how-it-works.md) + - [Hello World](./developers/exex/hello-world.md) + - [Tracking State](./developers/exex/tracking-state.md) + - [Remote](./developers/exex/remote.md) - [Contribute](./developers/contribute.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 089de1b65a67..5f02f1e9ee04 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -32,8 +32,6 @@ - [`reth p2p`](./reth/p2p.md) - [`reth p2p header`](./reth/p2p/header.md) - [`reth p2p body`](./reth/p2p/body.md) - - [`reth test-vectors`](./reth/test-vectors.md) - - [`reth test-vectors tables`](./reth/test-vectors/tables.md) - [`reth config`](./reth/config.md) - [`reth debug`](./reth/debug.md) - [`reth debug execution`](./reth/debug/execution.md) @@ -43,4 +41,5 @@ - [`reth debug replay-engine`](./reth/debug/replay-engine.md) - [`reth recover`](./reth/recover.md) - [`reth recover storage-tries`](./reth/recover/storage-tries.md) + - [`reth prune`](./reth/prune.md) diff --git a/book/cli/reth.md b/book/cli/reth.md index a4ba8f3d3d9c..cebeb44e2378 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -15,10 +15,10 @@ Commands: db Database debugging utilities stage Manipulate individual stages p2p P2P Debugging utilities - test-vectors Generate Test Vectors config Write config to stdout debug Various debug routines recover Scripts for node recovery + prune Prune according to the configuration without any limits help Print this message or the help of the given subcommand(s) Options: @@ -27,7 +27,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/config.md b/book/cli/reth/config.md index 1b2a89c665de..df0d261b07b1 100644 --- a/book/cli/reth/config.md +++ b/book/cli/reth/config.md @@ -18,7 +18,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index b884b7d0f0b3..b867134a9d33 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -56,7 +56,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index 2779b8d770ac..d61094834d39 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -20,7 +20,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/dump-genesis.md b/book/cli/reth/dump-genesis.md index 5add92402137..7197be305f26 100644 --- a/book/cli/reth/dump-genesis.md +++ b/book/cli/reth/dump-genesis.md @@ -12,7 +12,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 5a139e348cf4..29a67f181764 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index b1802b253a06..d947baec376d 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index 8fe3fe018c0d..5eb9d4d03ba4 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index cd07d0692f58..61759a694e99 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -15,7 +15,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] @@ -313,6 +313,16 @@ RPC: [default: 50000000] + --rpc.eth-proof-window + The maximum proof window for historical proof generation. This value allows for generating historical proofs up to configured number of blocks from current tip (up to `tip - window`) + + [default: 0] + + --rpc.proof-permits + Maximum number of concurrent getproof requests + + [default: 25] + RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache @@ -447,11 +457,6 @@ Builder: [default: 3] Debug: - --debug.continuous - Prompt the downloader to download blocks one at a time. - - NOTE: This is for testing purposes only. - --debug.terminate Flag indicating whether the node should be terminated after the pipeline sync diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index ada874d8bfa6..0177244a3a73 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -20,7 +20,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md new file mode 100644 index 000000000000..0b3e701f6b30 --- /dev/null +++ b/book/cli/reth/prune.md @@ -0,0 +1,146 @@ +# reth prune + +Prune according to the configuration without any limits + +```bash +$ reth prune --help +Usage: reth prune [OPTIONS] + +Options: + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static_files + The absolute path to store static files in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, dev + + [default: mainnet] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/recover.md b/book/cli/reth/recover.md index 9ffd8eb70f57..4fe28211db0b 100644 --- a/book/cli/reth/recover.md +++ b/book/cli/reth/recover.md @@ -16,7 +16,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 649580382b11..d5df358a711d 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage.md b/book/cli/reth/stage.md index 17a888b6ecde..c9ff302c1aa0 100644 --- a/book/cli/reth/stage.md +++ b/book/cli/reth/stage.md @@ -19,7 +19,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index dc2f1330bb05..b700519e1a87 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index f08b9ffd81c6..a5fd3052c0b6 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -51,7 +51,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index a98a2be6dab6..4fa8e0a38b23 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index a1a538f3b1dc..b9765bd8db18 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -49,7 +49,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] @@ -204,6 +204,9 @@ Networking: [default: 131072] + --offline + If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/test-vectors.md b/book/cli/reth/test-vectors.md index da1b3c933f62..844c5ed8455a 100644 --- a/book/cli/reth/test-vectors.md +++ b/book/cli/reth/test-vectors.md @@ -16,7 +16,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/test-vectors/tables.md b/book/cli/reth/test-vectors/tables.md index 3b8f52f2c211..2a3023817b35 100644 --- a/book/cli/reth/test-vectors/tables.md +++ b/book/cli/reth/test-vectors/tables.md @@ -16,7 +16,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/developers/developers.md b/book/developers/developers.md index e5bf7cde90de..9d8c5a9c6739 100644 --- a/book/developers/developers.md +++ b/book/developers/developers.md @@ -1,3 +1,3 @@ # Developers -Reth is composed of several crates that can be used in standalone projects. If you are interested in using one or more of the crates, you can get an overview of them in the [developer docs](https://github.com/paradigmxyz/reth/tree/main/docs), or take a look at the [crate docs](https://paradigmxyz.github.io/reth/docs). \ No newline at end of file +Reth is composed of several crates that can be used in standalone projects. If you are interested in using one or more of the crates, you can get an overview of them in the [developer docs](https://github.com/paradigmxyz/reth/tree/main/docs), or take a look at the [crate docs](https://paradigmxyz.github.io/reth/docs). diff --git a/book/developers/exex/assets/remote_exex.png b/book/developers/exex/assets/remote_exex.png new file mode 100644 index 000000000000..8606616e8113 Binary files /dev/null and b/book/developers/exex/assets/remote_exex.png differ diff --git a/book/developers/exex/exex.md b/book/developers/exex/exex.md new file mode 100644 index 000000000000..b65d3173677b --- /dev/null +++ b/book/developers/exex/exex.md @@ -0,0 +1,30 @@ +# Execution Extensions (ExEx) + +## What are Execution Extensions? + +Execution Extensions (or ExExes, for short) allow developers to build their own infrastructure that relies on Reth +as a base for driving the chain (be it [Ethereum](../../run/mainnet.md) or [OP Stack](../../run/optimism.md)) forward. + +An Execution Extension is a task that derives its state from changes in Reth's state. +Some examples of such state derivations are rollups, bridges, and indexers. + +They are called Execution Extensions because the main trigger for them is the execution of new blocks (or reorgs of old blocks) +initiated by Reth. + +Read more about things you can build with Execution Extensions in the [Paradigm blog](https://www.paradigm.xyz/2024/05/reth-exex). + +## What Execution Extensions are not + +Execution Extensions are not separate processes that connect to the main Reth node process. +Instead, ExExes are compiled into the same binary as Reth, and run alongside it, using shared memory for communication. + +If you want to build an Execution Extension that sends data into a separate process, check out the [Remote](./remote.md) chapter. + +## How do I build an Execution Extension? + +Let's dive into how to build our own ExEx from scratch, add tests for it, +and run it on the Holesky testnet. + +1. [How do ExExes work?](./how-it-works.md) +1. [Hello World](./hello-world.md) +1. [Tracking State](./tracking-state.md) diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md new file mode 100644 index 000000000000..0f50cacbb9a6 --- /dev/null +++ b/book/developers/exex/hello-world.md @@ -0,0 +1,166 @@ +# Hello World + +Let's write a simple "Hello World" ExEx that emits a log every time a new chain of blocks is committed, reverted, or reorged. + +### Create a project + +First, let's create a new project for our ExEx + +```console +cargo new --bin my-exex +cd my-exex +``` + +And add Reth as a dependency in `Cargo.toml` + +```toml +[package] +name = "my-exex" +version = "0.1.0" +edition = "2021" + +[dependencies] +reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth +reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging +eyre = "0.6" # Easy error handling +``` + +### Default Reth node + +Now, let's jump to our `main.rs` and start by initializing and launching a default Reth node + +```rust,norun,noplayground,ignore +use reth_node_ethereum::EthereumNode; + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle.wait_for_node_exit().await + }) +} +``` + +You can already test that it works by running the binary and initializing the Holesky node in a custom datadir +(to not interfere with any instances of Reth you already have on your machine): + +```console +$ cargo run -- init --chain holesky --datadir data + +2024-06-12T16:48:06.420296Z INFO reth init starting +2024-06-12T16:48:06.422380Z INFO Opening storage db_path="data/db" sf_path="data/static_files" +2024-06-12T16:48:06.432939Z INFO Verifying storage consistency. +2024-06-12T16:48:06.577673Z INFO Genesis block written hash=0xb5f7f912443c940f21fd611f12828d75b53 +4364ed9e95ca4e307729a4661bde4 +``` + +### Simplest ExEx + +The simplest ExEx is just an async function that never returns. We need to install it into our node + +```rust,norun,noplayground,ignore +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { + loop {} +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} +``` + +See that unused `_ctx`? That's the context that we'll use to listen to new notifications coming from the main node, +and send events back to it. It also contains all components that the node exposes to the ExEx. + +Currently, our ExEx does absolutely nothing by running an infinite loop in an async function that never returns. + +
+ +It's important that the future returned by the ExEx (`my_exex`) never resolves. + +If you try running a node with an ExEx that exits, the node will exit as well. + +
+ +### Hello World ExEx + +Now, let's extend our simplest ExEx and start actually listening to new notifications, log them, and send events back to the main node + +```rust,norun,noplayground,ignore +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.recv().await { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + ctx.events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} +``` + +Woah, there's a lot of new stuff here! Let's go through it step by step: + +- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in. + - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in. +- Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification. + - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg. +- Finally, we're checking if the notification contains a committed chain, and if it does, we're sending a `ExExEvent::FinishedHeight` event back to the main node using the `ctx.events.send` method. + +
+ +Sending an `ExExEvent::FinishedHeight` event is a very important part of every ExEx. + +It's the only way to communicate to the main node that the ExEx has finished processing the specified height +and it's safe to prune the associated data. + +
+ +What we've arrived at is the [minimal ExEx example](https://github.com/paradigmxyz/reth/blob/b8cd7be6c92a71aea5341cdeba685f124c6de540/examples/exex/minimal/src/main.rs) that we provide in the Reth repository. + +## What's next? + +Let's do something a bit more interesting, and see how you can [keep track of some state](./tracking-state.md) inside your ExEx. diff --git a/book/developers/exex/how-it-works.md b/book/developers/exex/how-it-works.md new file mode 100644 index 000000000000..7fd179bf9155 --- /dev/null +++ b/book/developers/exex/how-it-works.md @@ -0,0 +1,26 @@ +# How do ExExes work? + +ExExes are just [Futures](https://doc.rust-lang.org/std/future/trait.Future.html) that run indefinitely alongside Reth +– as simple as that. + +An ExEx is usually driven by and acts on new notifications about chain commits, reverts, and reorgs, but it can span beyond that. + +They are installed into the node by using the [node builder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html). +Reth manages the lifecycle of all ExExes, including: +- Polling ExEx futures +- Sending [notifications](https://reth.rs/docs/reth_exex/enum.ExExNotification.html) about new chain, reverts, + and reorgs from historical and live sync +- Processing [events](https://reth.rs/docs/reth_exex/enum.ExExEvent.html) emitted by ExExes +- Pruning (in case of a full or pruned node) only the data that have been processed by all ExExes +- Shutting ExExes down when the node is shut down + +## Pruning + +Pruning deserves a special mention here. + +ExExes **SHOULD** emit an [`ExExEvent::FinishedHeight`](https://reth.rs/docs/reth_exex/enum.ExExEvent.html#variant.FinishedHeight) +event to signify what blocks have been processed. This event is used by Reth to determine what state can be pruned. + +An ExEx will only receive notifications for block numbers greater than the block in the most recently emitted `FinishedHeight` event. + +To clarify: if an ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for any `block_number > 0`. diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md new file mode 100644 index 000000000000..e0caa72f62d3 --- /dev/null +++ b/book/developers/exex/remote.md @@ -0,0 +1,490 @@ +# Remote Execution Extensions + +In this chapter, we will learn how to create an ExEx that emits all notifications to an external process. + +We will use [Tonic](https://github.com/hyperium/tonic) to create a gRPC server and a client. +- The server binary will have the Reth client, our ExEx and the gRPC server. +- The client binary will have the gRPC client that connects to the server. + +## Prerequisites + +See [section](https://github.com/hyperium/tonic?tab=readme-ov-file#dependencies) of the Tonic documentation +to install the required dependencies. + +## Create a new project + +Let's create a new project. Don't forget to provide the `--lib` flag to `cargo new`, +because we will have two custom binaries in this project that we will create manually. + +```console +$ cargo new --lib exex-remote +$ cd exex-remote +``` + +We will also need a bunch of dependencies. Some of them you know from the [Hello World](./hello-world.md) chapter, +but some of specific to what we need now. + +```toml +[package] +name = "remote-exex" +version = "0.1.0" +edition = "2021" + +[dependencies] +# reth +reth = { git = "https://github.com/paradigmxyz/reth.git" } +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } + +# async +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +futures-util = "0.3" + +# grpc +tonic = "0.11" +prost = "0.12" +bincode = "1" + +# misc +eyre = "0.6" + +[build-dependencies] +tonic-build = "0.11" + +[[bin]] +name = "exex" +path = "src/exex.rs" + +[[bin]] +name = "consumer" +path = "src/consumer.rs" +``` + +We also added a build dependency for Tonic. We will use it to generate the Rust code for our +Protobuf definitions at compile time. Read more about using Tonic in the +[introductory tutorial](https://github.com/hyperium/tonic/blob/6a213e9485965db0628591e30577ed81cdaeaf2b/examples/helloworld-tutorial.md). + +Also, we now have two separate binaries: +- `exex` is the server binary that will run the ExEx and the gRPC server. +- `consumer` is the client binary that will connect to the server and receive notifications. + +### Create the Protobuf definitions + +In the root directory of your project (not `src`), create a new directory called `proto` and a file called `exex.proto`. + +We define a service called `RemoteExEx` that exposes a single method called `Subscribe`. +This method streams notifications to the client. + +
+ +A proper way to represent the notification would be to define all fields in the schema, but it goes beyond the scope +of this chapter. + +For an example of a full schema, see the [Remote ExEx](https://github.com/paradigmxyz/reth-exex-grpc/blob/22b26f7beca1c74577d28be3b3838eb352747be0/proto/exex.proto) example. + +
+ +```protobuf +syntax = "proto3"; + +package exex; + +service RemoteExEx { + rpc Subscribe(SubscribeRequest) returns (stream ExExNotification) {} +} + +message SubscribeRequest {} + +message ExExNotification { + bytes data = 1; +} +``` + +To instruct Tonic to generate the Rust code using this `.proto`, add the following lines to your `lib.rs` file: +```rust,norun,noplayground,ignore +pub mod proto { + tonic::include_proto!("exex"); +} +``` + +## ExEx and gRPC server + +We will now create the ExEx and the gRPC server in our `src/exex.rs` file. + +### gRPC server + +Let's create a minimal gRPC server that listens on the port `:10000`, and spawn it using +the [NodeBuilder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html)'s [task executor](https://reth.rs/docs/reth/tasks/struct.TaskExecutor.html). + +```rust,norun,noplayground,ignore +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_exex::ExExNotification; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService {} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (_tx, rx) = mpsc::channel(1); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService {})) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` + +Currently, it does not send anything on the stream. +We need to create a communication channel between our future ExEx and this gRPC server +to send new `ExExNotification` on it. + +Let's create this channel in the `main` function where we will have both gRPC server and ExEx initiated, +and save the sender part (that way we will be able to create new receivers) of this channel in our gRPC server. + +```rust,norun,noplayground,ignore +// ... +use reth_exex::{ExExNotification}; + +struct ExExService { + notifications: Arc>, +} + +... + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .launch() + .await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` + +And with that, we're ready to handle incoming notifications, serialize them with [bincode](https://docs.rs/bincode/) +and send back to the client. + +For each incoming request, we spawn a separate tokio task that will run in the background, +and then return the stream receiver to the client. + +```rust,norun,noplayground,ignore +// ... + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!("Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +// ... +``` + +That's it for the gRPC server part! It doesn't receive anything on the `notifications` channel yet, +but we will fix it with our ExEx. + +### ExEx + +Now, let's define the ExEx part of our binary. + +Our ExEx accepts a `notifications` channel and redirects all incoming `ExExNotification`s to it. + +
+ +Don't forget to emit `ExExEvent::FinishedHeight` + +
+ +```rust,norun,noplayground,ignore +// ... +use reth_exex::{ExExContext, ExExEvent}; + +async fn remote_exex( + mut ctx: ExExContext, + notifications: Arc>, +) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.recv().await { + if let Some(committed_chain) = notification.committed_chain() { + ctx.events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + + info!("Notification sent to the gRPC server"); + let _ = notifications.send(notification); + } + + Ok(()) +} + +// ... +``` + +All that's left is to connect all pieces together: install our ExEx in the node and pass the sender part +of communication channel to it. + +```rust,norun,noplayground,ignore +// ... + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .install_exex("remote-exex", |ctx| async move { + Ok(remote_exex(ctx, notifications)) + }) + .launch() + .await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` + +### Full `exex.rs` code + +
+Click to expand + +```rust,norun,noplayground,ignore +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService { + notifications: Arc>, +} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!(?notification, "Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +async fn remote_exex( + mut ctx: ExExContext, + notifications: Arc>, +) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.recv().await { + if let Some(committed_chain) = notification.committed_chain() { + ctx.events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + + info!(?notification, "Notification sent to the gRPC server"); + let _ = notifications.send(notification); + } + + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .install_exex("remote-exex", |ctx| async move { + Ok(remote_exex(ctx, notifications)) + }) + .launch() + .await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` +
+ +## Consumer + +Consumer will be a much simpler binary that just connects to our gRPC server and prints out all the notifications +it receives. + +
+ +We need to increase maximum message encoding and decoding sizes to `usize::MAX`, +because notifications can get very heavy + +
+ +```rust,norun,noplayground,ignore +use remote_exex::proto::{remote_ex_ex_client::RemoteExExClient, SubscribeRequest}; +use reth_exex::ExExNotification; +use reth_tracing::{tracing::info, RethTracer, Tracer}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + let _ = RethTracer::new().init()?; + + let mut client = RemoteExExClient::connect("http://[::1]:10000") + .await? + .max_encoding_message_size(usize::MAX) + .max_decoding_message_size(usize::MAX); + + let mut stream = client.subscribe(SubscribeRequest {}).await?.into_inner(); + while let Some(notification) = stream.message().await? { + let notification: ExExNotification = bincode::deserialize(¬ification.data)?; + + match notification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + } + + Ok(()) +} +``` + +## Running + +In one terminal window, we will run our ExEx and gRPC server. It will start syncing Reth on the Holesky chain +and use Etherscan in place of a real Consensus Client. + +```console +cargo run --bin exex --release -- node --chain holesky --debug.etherscan +``` + +And in the other, we will run our consumer: + +```console +cargo run --bin consumer --release +``` + + diff --git a/book/developers/exex/tracking-state.md b/book/developers/exex/tracking-state.md new file mode 100644 index 000000000000..5fe8b1c9ef83 --- /dev/null +++ b/book/developers/exex/tracking-state.md @@ -0,0 +1,193 @@ +# Tracking State + +In this chapter, we'll learn how to keep track of some state inside our ExEx. + +Let's continue with our Hello World example from the [previous chapter](./hello-world.md). + +### Turning ExEx into a struct + +First, we need to turn our ExEx into a stateful struct. + +Before, we had just an async function, but now we'll need to implement +the [`Future`](https://doc.rust-lang.org/std/future/trait.Future.html) trait manually. + +
+ +Having a stateful async function is also possible, but it makes testing harder, +because you can't access variables inside the function to assert the state of your ExEx. + +
+ +```rust,norun,noplayground,ignore +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +struct MyExEx { + ctx: ExExContext, +} + +impl Future for MyExEx { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + this.ctx + .events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + + Poll::Ready(Ok(())) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(MyExEx { ctx }) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} +``` + +For those who are not familiar with how async Rust works on a lower level, that may seem scary, +but let's unpack what's going on here: + +1. Our ExEx is now a `struct` that contains the context and implements the `Future` trait. It's now pollable (hence `await`-able). +1. We can't use `self` directly inside our `poll` method, and instead need to acquire a mutable reference to the data inside of the `Pin`. + Read more about pinning in [the book](https://rust-lang.github.io/async-book/04_pinning/01_chapter.html). +1. We also can't use `await` directly inside `poll`, and instead need to poll futures manually. + We wrap the call to `poll_recv(cx)` into a [`ready!`](https://doc.rust-lang.org/std/task/macro.ready.html) macro, + so that if the channel of notifications has no value ready, we will instantly return `Poll::Pending` from our Future. +1. We initialize and return the `MyExEx` struct directly in the `install_exex` method, because it's a Future. + +With all that done, we're now free to add more fields to our `MyExEx` struct, and track some state in them. + +### Adding state + +Our ExEx will count the number of transactions in each block and log it to the console. + +```rust,norun,noplayground,ignore +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +use reth::{api::FullNodeComponents, primitives::BlockNumber}; +use reth_exex::{ExExContext, ExExEvent}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +struct MyExEx { + ctx: ExExContext, + /// First block that was committed since the start of the ExEx. + first_block: Option, + /// Total number of transactions committed. + transactions: u64, +} + +impl MyExEx { + fn new(ctx: ExExContext) -> Self { + Self { + ctx, + first_block: None, + transactions: 0, + } + } +} + +impl Future for MyExEx { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) { + if let Some(reverted_chain) = notification.reverted_chain() { + this.transactions = this.transactions.saturating_sub( + reverted_chain + .blocks_iter() + .map(|b| b.body.len() as u64) + .sum(), + ); + } + + if let Some(committed_chain) = notification.committed_chain() { + this.first_block.get_or_insert(committed_chain.first().number); + + this.transactions += committed_chain + .blocks_iter() + .map(|b| b.body.len() as u64) + .sum::(); + + this.ctx + .events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + + if let Some(first_block) = this.first_block { + info!(%first_block, transactions = %this.transactions, "Total number of transactions"); + } + } + + Poll::Ready(Ok(())) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(MyExEx::new(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} +``` + +As you can see, we added two fields to our ExEx struct: +- `first_block` to keep track of the first block that was committed since the start of the ExEx. +- `transactions` to keep track of the total number of transactions committed, accounting for reorgs and reverts. + +We also changed our `match` block to two `if` clauses: +- First one checks if there's a reverted chain using `notification.reverted_chain()`. If there is: + - We subtract the number of transactions in the reverted chain from the total number of transactions. + - It's important to do the `saturating_sub` here, because if we just started our node and + instantly received a reorg, our `transactions` field will still be zero. +- Second one checks if there's a committed chain using `notification.committed_chain()`. If there is: + - We update the `first_block` field to the first block of the committed chain. + - We add the number of transactions in the committed chain to the total number of transactions. + - We send a `FinishedHeight` event back to the main node. + +Finally, on every notification, we log the total number of transactions and +the first block that was committed since the start of the ExEx. diff --git a/book/developers/profiling.md b/book/developers/profiling.md index 884032b2ac88..f1fdf520eb2e 100644 --- a/book/developers/profiling.md +++ b/book/developers/profiling.md @@ -41,12 +41,12 @@ cargo build --features jemalloc-prof ``` When performing a longer-running or performance-sensitive task with reth, such as a sync test or load benchmark, it's usually recommended to use the `maxperf` profile. However, the `maxperf` -profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `debug-fast`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile: +profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `profiling`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile: ``` -cargo build --features jemalloc-prof --profile debug-fast +cargo build --features jemalloc-prof --profile profiling # May improve performance even more -RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile debug-fast +RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile profiling ``` ### Monitoring memory usage diff --git a/book/intro.md b/book/intro.md index 1a334fbb170e..077cfed30883 100644 --- a/book/intro.md +++ b/book/intro.md @@ -1,12 +1,11 @@ # Reth Book -_Documentation for Reth users and developers._ +_Documentation for Reth users and developers._ [![Telegram Chat][tg-badge]][tg-url] -Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.** +Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.** -Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime servi -ces. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. +Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. @@ -56,12 +55,12 @@ We want to solve for node operators that care about fast historical queries, but We also want to support teams and individuals who want both sync from genesis and via "fast sync". -We envision that Reth will be configurable enough for the tradeoffs that each team faces. +We envision that Reth will be configurable enough for the tradeoffs that each team faces. ## Who is this for? Reth is a new Ethereum full node that allows users to sync and interact with the entire blockchain, including its historical state if in archive mode. -- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process. +- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process. - Archive node: It can also be used as an archive node, which stores the entire history of the blockchain and is useful for applications that need access to historical data. As a data engineer/analyst, or as a data indexer, you'll want to use Archive mode. For all other use cases where historical access is not needed, you can use Full mode. @@ -76,7 +75,10 @@ Reth implements the specification of Ethereum as defined in the [ethereum/execut 1. We operate multiple nodes at the tip of Ethereum mainnet and various testnets. 1. We extensively unit test, fuzz test and document all our code, while also restricting PRs with aggressive lint rules. -We intend to also audit / fuzz the EVM & parts of the codebase. Please reach out if you're interested in collaborating on securing this codebase. +We have completed an audit of the [Reth v1.0.0-rc.2](https://github.com/paradigmxyz/reth/releases/tag/v1.0.0-rc.2) with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](https://github.com/paradigmxyz/reth/blob/main/audit/sigma_prime_audit_v2.pdf). + +[Revm](https://github.com/bluealloy/revm) (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. + ## Sections diff --git a/book/run/optimism.md b/book/run/optimism.md index a44d0b603fc2..3e4c76b7c763 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -83,10 +83,12 @@ op-node \ --l2.jwt-secret=/path/to/jwt.hex \ --rpc.addr=0.0.0.0 \ --rpc.port=7000 \ - --l1.trustrpc \ --l1.beacon= + --syncmode=execution-layer ``` +Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost. + If you opted to build the `op-node` with the `rethdb` build tag, this feature can be enabled by appending one extra flag to the `op-node` invocation: > Note, the `reth_db_path` is the path to the `db` folder inside of the reth datadir, not the `mdbx.dat` file itself. This can be fetched from `op-reth db path [--chain ]`, or if you are using a custom datadir location via the `--datadir` flag, @@ -101,7 +103,7 @@ op-node \ [l1-el-spec]: https://github.com/ethereum/execution-specs [rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md [op-geth-forkdiff]: https://op-geth.optimism.io -[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/introduction.md#sequencers +[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers [op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs [l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md [deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index c85ff7d54786..3a987e52c73a 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -2,105 +2,96 @@ For those who need a private testnet to validate functionality or scale with Reth. ## Using Docker locally -This guide uses [Kurtosis' ethereum-package](https://github.com/kurtosis-tech/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine. +This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine. * Go [here](https://docs.kurtosis.com/install/) to install Kurtosis * Go [here](https://docs.docker.com/get-docker/) to install Docker -The [`ethereum-package`](https://github.com/kurtosis-tech/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. +The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. -To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/ethereum-package#configuration). +To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/ethpandaops/ethereum-package#configuration). -Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/kurtosis-tech/ethereum-package/). +Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/ethpandaops/ethereum-package/). ### Step 1: Define the parameters and shape of your private network -First, in your home directory, create a file with the name `network_params.json` with the following contents: -```json -{ - "participants": [ - { - "el_type": "reth", - "el_image": "ghcr.io/paradigmxyz/reth", - "cl_type": "lighthouse", - "cl_image": "sigp/lighthouse:latest", - "count": 1 - }, - { - "el_type": "reth", - "el_image": "ghcr.io/paradigmxyz/reth", - "cl_type": "teku", - "cl_image": "consensys/teku:latest", - "count": 1 - } - ], - "launch_additional_services": false -} +First, in your home directory, create a file with the name `network_params.yaml` with the following contents: +```yaml +participants: + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: lighthouse + cl_image: sigp/lighthouse:latest + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: teku + cl_image: consensys/teku:latest ``` > [!TIP] -> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.json` file. +> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.yaml` file. ### Step 2: Spin up your network Next, run the following command from your command line: ```bash -kurtosis run github.com/kurtosis-tech/ethereum-package --args-file ~/network_params.json +kurtosis run github.com/ethpandaops/ethereum-package --args-file ~/network_params.yaml --image-download always ``` Kurtosis will spin up an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: ```console -INFO[2023-08-21T18:22:18-04:00] ==================================================== -INFO[2023-08-21T18:22:18-04:00] || Created enclave: silky-swamp || -INFO[2023-08-21T18:22:18-04:00] ==================================================== -Name: silky-swamp -UUID: 3df730c66123 +INFO[2024-07-09T12:01:35+02:00] ======================================================== +INFO[2024-07-09T12:01:35+02:00] || Created enclave: silent-mountain || +INFO[2024-07-09T12:01:35+02:00] ======================================================== +Name: silent-mountain +UUID: cb5d0a7d0e7c Status: RUNNING -Creation Time: Mon, 21 Aug 2023 18:21:32 EDT +Creation Time: Tue, 09 Jul 2024 12:00:03 CEST +Flags: ========================================= Files Artifacts ========================================= UUID Name -c168ec4468f6 1-lighthouse-reth-0-63 -61f821e2cfd5 2-teku-reth-64-127 -e6f94fdac1b8 cl-genesis-data -e6b57828d099 el-genesis-data -1fb632573a2e genesis-generation-config-cl -b8917e497980 genesis-generation-config-el -6fd8c5be336a geth-prefunded-keys -6ab83723b4bd prysm-password +414a075a37aa 1-lighthouse-reth-0-63-0 +34d0b9ff906b 2-teku-reth-64-127-0 +dffa1bcd1da1 el_cl_genesis_data +fdb202429b26 final-genesis-timestamp +da0d9d24b340 genesis-el-cl-env-file +55c46a6555ad genesis_validators_root +ba79dbd109dd jwt_file +04948fd8b1e3 keymanager_file +538211b6b7d7 prysm-password +ed75fe7d5293 validator-ranges ========================================== User Services ========================================== -UUID Name Ports Status -95386198d3f9 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:64947 RUNNING - metrics: 5054/tcp -> http://127.0.0.1:64948 - tcp-discovery: 9000/tcp -> 127.0.0.1:64949 - udp-discovery: 9000/udp -> 127.0.0.1:60303 -5f5cc4cf639a cl-1-lighthouse-reth-validator http: 5042/tcp -> 127.0.0.1:64950 RUNNING - metrics: 5064/tcp -> http://127.0.0.1:64951 -27e1cfaddc72 cl-2-teku-reth http: 4000/tcp -> 127.0.0.1:64954 RUNNING - metrics: 8008/tcp -> 127.0.0.1:64952 - tcp-discovery: 9000/tcp -> 127.0.0.1:64953 - udp-discovery: 9000/udp -> 127.0.0.1:53749 -b454497fbec8 el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:64941 RUNNING - metrics: 9001/tcp -> 127.0.0.1:64937 - rpc: 8545/tcp -> 127.0.0.1:64939 - tcp-discovery: 30303/tcp -> 127.0.0.1:64938 - udp-discovery: 30303/udp -> 127.0.0.1:55861 - ws: 8546/tcp -> 127.0.0.1:64940 -03a2ef13c99b el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:64945 RUNNING - metrics: 9001/tcp -> 127.0.0.1:64946 - rpc: 8545/tcp -> 127.0.0.1:64943 - tcp-discovery: 30303/tcp -> 127.0.0.1:64942 - udp-discovery: 30303/udp -> 127.0.0.1:64186 - ws: 8546/tcp -> 127.0.0.1:64944 -5c199b334236 prelaunch-data-generator-cl-genesis-data RUNNING -46829c4bd8b0 prelaunch-data-generator-el-genesis-data RUNNING +UUID Name Ports Status +0853f809c300 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:32811 RUNNING + metrics: 5054/tcp -> http://127.0.0.1:32812 + tcp-discovery: 9000/tcp -> 127.0.0.1:32813 + udp-discovery: 9000/udp -> 127.0.0.1:32776 +f81cd467efe3 cl-2-teku-reth http: 4000/tcp -> http://127.0.0.1:32814 RUNNING + metrics: 8008/tcp -> http://127.0.0.1:32815 + tcp-discovery: 9000/tcp -> 127.0.0.1:32816 + udp-discovery: 9000/udp -> 127.0.0.1:32777 +f21d5ca3061f el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:32803 RUNNING + metrics: 9001/tcp -> http://127.0.0.1:32804 + rpc: 8545/tcp -> 127.0.0.1:32801 + tcp-discovery: 30303/tcp -> 127.0.0.1:32805 + udp-discovery: 30303/udp -> 127.0.0.1:32774 + ws: 8546/tcp -> 127.0.0.1:32802 +e234b3b4a440 el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:32808 RUNNING + metrics: 9001/tcp -> http://127.0.0.1:32809 + rpc: 8545/tcp -> 127.0.0.1:32806 + tcp-discovery: 30303/tcp -> 127.0.0.1:32810 + udp-discovery: 30303/udp -> 127.0.0.1:32775 + ws: 8546/tcp -> 127.0.0.1:32807 +92dd5a0599dc validator-key-generation-cl-validator-keystore RUNNING +f0a7d5343346 vc-1-reth-lighthouse metrics: 8080/tcp -> http://127.0.0.1:32817 RUNNING ``` -Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network. +Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network. ## Using Kurtosis on Kubernetes Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker or Kubernetes, locally or on remote infrastructure. For use cases that require a larger scale, Kurtosis can be deployed on Kubernetes by following these docs [here](https://docs.kurtosis.com/k8s/). ## Running the network with additional services -The [`ethereum-package`](https://github.com/kurtosis-tech/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include: +The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include: - A Grafana + Prometheus instance - A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz) - [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer) diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index d9df4139bb10..19d57e6398c9 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -12,9 +12,14 @@ Importing OP mainnet Bedrock datadir requires exported data: ## Manual Export Steps -See . +The `op-geth` Bedrock datadir can be downloaded from . -Output from running the command to export state, can also be downloaded from . +To export the OVM chain from `op-geth`, clone the `testinprod-io/op-geth` repo and checkout +. Commands to export blocks, receipts and state dump can be +found in `op-geth/migrate.sh`. + +Output from running the command to export state, can also be downloaded from +. ## Manual Import Steps diff --git a/book/run/transactions.md b/book/run/transactions.md index 65aa979e238e..61327b57300a 100644 --- a/book/run/transactions.md +++ b/book/run/transactions.md @@ -1,6 +1,6 @@ # Transaction types -Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Three significant transaction types that have evolved are: +Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Four significant transaction types that have evolved are: - Legacy Transactions, - EIP-2930 Transactions, @@ -46,4 +46,4 @@ Alongside the legacy parameters & parameters from EIP-1559, the EIP-4844 transac - `max_fee_per_blob_gas`, The maximum total fee per gas the sender is willing to pay for blob gas in wei - `blob_versioned_hashes`, List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. -The actual blob fee is deducted from the sender balance before transaction execution and burned, and is not refunded in case of transaction failure. \ No newline at end of file +The actual blob fee is deducted from the sender balance before transaction execution and burned, and is not refunded in case of transaction failure. diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 68a7cc29ea85..7368b6631abb 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -109,3 +109,71 @@ pthread_mutex_lock.c:438: __pthread_mutex_lock_full: Assertion `e != ESRCH || !r If you are using Docker, a possible solution is to run all database-accessing containers with `--pid=host` flag. For more information, check out the `Containers` section in the [libmdbx README](https://github.com/erthink/libmdbx#containers). + +## Hardware Performance Testing + +If you're experiencing degraded performance, it may be related to hardware issues. Below are some tools and tests you can run to evaluate your hardware performance. + +If your hardware performance is significantly lower than these reference numbers, it may explain degraded node performance. Consider upgrading your hardware or investigating potential issues with your current setup. + +### Disk Speed Testing with [IOzone](https://linux.die.net/man/1/iozone) + +1. Test disk speed: + ```bash + iozone -e -t1 -i0 -i2 -r1k -s1g /tmp + ``` + Reference numbers (on Latitude c3.large.x86): + + ```console + Children see throughput for 1 initial writers = 907733.81 kB/sec + Parent sees throughput for 1 initial writers = 907239.68 kB/sec + Children see throughput for 1 rewriters = 1765222.62 kB/sec + Parent sees throughput for 1 rewriters = 1763433.35 kB/sec + Children see throughput for 1 random readers = 1557497.38 kB/sec + Parent sees throughput for 1 random readers = 1554846.58 kB/sec + Children see throughput for 1 random writers = 984428.69 kB/sec + Parent sees throughput for 1 random writers = 983476.67 kB/sec + ``` +2. Test disk speed with memory-mapped files: + ```bash + iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp + ``` + Reference numbers (on Latitude c3.large.x86): + + ```console + Children see throughput for 1 initial writers = 56471.06 kB/sec + Parent sees throughput for 1 initial writers = 56365.14 kB/sec + Children see throughput for 1 rewriters = 241650.69 kB/sec + Parent sees throughput for 1 rewriters = 239067.96 kB/sec + Children see throughput for 1 random readers = 6833161.00 kB/sec + Parent sees throughput for 1 random readers = 5597659.65 kB/sec + Children see throughput for 1 random writers = 220248.53 kB/sec + Parent sees throughput for 1 random writers = 219112.26 kB/sec + ``` + +### RAM Speed and Health Testing + +1. Check RAM speed with [lshw](https://linux.die.net/man/1/lshw): + ```bash + sudo lshw -short -C memory + ``` + Look for the frequency in the output. Reference output: + + ```console + H/W path Device Class Description + ================================================================ + /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) + /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) + ... + ``` + +2. Test RAM health with [memtester](https://linux.die.net/man/8/memtester): + ```bash + sudo memtester 10G + ``` + This will take a while. You can test with a smaller amount first: + + ```bash + sudo memtester 1G 1 + ``` + All checks should report "ok". diff --git a/clippy.toml b/clippy.toml index 7e606c3f1f9f..865dfc7c95a5 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,2 +1,2 @@ -msrv = "1.76" +msrv = "1.79" too-large-for-stack = 128 diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index 5bd75590b504..000000000000 --- a/codecov.yml +++ /dev/null @@ -1,68 +0,0 @@ -coverage: - status: - patch: off - project: - default: - threshold: null - informational: true -github_checks: - annotations: false -comment: - layout: "reach, files, flags, components" - require_changes: true -component_management: - individual_components: - - component_id: reth_binary - name: reth binary - paths: - - bin/** - - crates/config/** - - crates/metrics/** - - crates/tracing/** - - component_id: blockchain_tree - name: blockchain tree - paths: - - crates/blockchain-tree/** - - component_id: staged_sync - name: pipeline - paths: - - crates/stages/** - - component_id: storage - name: storage (db) - paths: - - crates/storage/** - - component_id: trie - name: trie - paths: - - crates/trie/** - - component_id: txpool - name: txpool - paths: - - crates/transaction-pool/** - - component_id: networking - name: networking - paths: - - crates/net/** - - component_id: rpc - name: rpc - paths: - - crates/rpc/** - - component_id: consensus - name: consensus - paths: - - crates/consensus/** - - component_id: revm - name: revm - paths: - - crates/revm/** - - component_id: builder - name: payload builder - paths: - - crates/payload/** - - component_id: primitives - name: primitives - paths: - - crates/primitives/** - - crates/tasks/** - - crates/rlp/** - - crates/interfaces/** \ No newline at end of file diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 64796939612d..b3679677a13c 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -21,6 +21,7 @@ reth-db-api.workspace = true reth-evm.workspace = true reth-revm.workspace = true reth-provider.workspace = true +reth-execution-types.workspace = true reth-prune-types.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } @@ -39,9 +40,10 @@ metrics.workspace = true # misc aquamarine.workspace = true -linked_hash_set = "0.1.4" +linked_hash_set.workspace = true [dev-dependencies] +reth-chainspec.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } @@ -52,6 +54,7 @@ reth-revm.workspace = true reth-evm-ethereum.workspace = true parking_lot.workspace = true assert_matches.workspace = true +alloy-genesis.workspace = true [features] test-utils = [] diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 420912b4088c..41c71a4c475c 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -3,8 +3,8 @@ use super::state::BlockchainId; use crate::canonical_chain::CanonicalChain; use linked_hash_set::LinkedHashSet; +use reth_execution_types::Chain; use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders}; -use reth_provider::Chain; use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; /// Internal indices of the blocks and chains. diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 84e7971e06c5..73bdbf2906ca 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -13,15 +13,15 @@ use reth_consensus::{Consensus, ConsensusError}; use reth_db_api::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, + BlockHash, BlockNumHash, BlockNumber, EthereumHardfork, ForkBlock, GotExpected, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, }; use reth_provider::{ BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, Chain, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DisplayBlocksChain, ExecutionOutcome, HeaderProvider, ProviderError, - StaticFileProviderFactory, + CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, + ChainSplitTarget, DisplayBlocksChain, HeaderProvider, ProviderError, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages_api::{MetricEvent, MetricEventsSender}; @@ -65,8 +65,6 @@ pub struct BlockchainTree { externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, - /// Prune modes. - prune_modes: Option, /// Broadcast channel for canon state changes notifications. canon_state_notification_sender: CanonStateNotificationSender, /// Metrics for sync stages. @@ -115,9 +113,9 @@ where /// storage space efficiently. It's important to validate this configuration to ensure it does /// not lead to unintended data loss. pub fn new( - externals: TreeExternals, + mut externals: TreeExternals, config: BlockchainTreeConfig, - prune_modes: Option, + prune_modes: PruneModes, ) -> ProviderResult { let max_reorg_depth = config.max_reorg_depth() as usize; // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg @@ -125,6 +123,9 @@ where let (canon_state_notification_sender, _receiver) = tokio::sync::broadcast::channel(max_reorg_depth * 2); + // Set the prune modes argument, on the provider + externals.provider_factory = externals.provider_factory.with_prune_modes(prune_modes); + let last_canonical_hashes = externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; @@ -138,7 +139,6 @@ where config.max_unconnected_blocks(), ), config, - prune_modes, canon_state_notification_sender, sync_metrics_tx: None, metrics: Default::default(), @@ -402,7 +402,7 @@ where .externals .provider_factory .chain_spec() - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(parent_td, U256::ZERO) { return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { @@ -1043,7 +1043,7 @@ where .externals .provider_factory .chain_spec() - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(td, U256::ZERO) { return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { @@ -1063,9 +1063,7 @@ where }; // we are splitting chain at the block hash that we want to make canonical - let Some(canonical) = - self.remove_and_split_chain(chain_id, ChainSplitTarget::Hash(block_hash)) - else { + let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else { debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into(), @@ -1200,7 +1198,7 @@ where } }); - durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChilds); + durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren); // Send notification about new canonical chain and return outcome of canonicalization. let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() }; @@ -1260,7 +1258,6 @@ where state, hashed_state, trie_updates, - self.prune_modes.as_ref(), ) .map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?; @@ -1369,8 +1366,10 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_genesis::{Genesis, GenesisAccount}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::transaction::DbTxMut; @@ -1383,18 +1382,17 @@ mod tests { use reth_primitives::{ constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH, ETHEREUM_BLOCK_GAS_LIMIT}, keccak256, - proofs::{calculate_transaction_root, state_root_unhashed}, + proofs::calculate_transaction_root, revm_primitives::AccountInfo, - Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256, - MAINNET, + Account, Address, Header, Signature, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256, }; use reth_provider::{ test_utils::{blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec}, ProviderFactory, }; use reth_stages_api::StageCheckpoint; - use reth_trie::StateRoot; + use reth_trie::{root::state_root_unhashed, StateRoot}; use std::collections::HashMap; fn setup_externals( @@ -1425,7 +1423,6 @@ mod tests { provider .insert_historical_block( genesis.try_seal_with_senders().expect("invalid tx signature in genesis"), - None, ) .unwrap(); @@ -1546,7 +1543,6 @@ mod tests { SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) .try_seal_with_senders() .unwrap(), - None, ) .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; @@ -1648,7 +1644,7 @@ mod tests { let mut tree = BlockchainTree::new( TreeExternals::new(provider_factory, consensus, executor_provider), BlockchainTreeConfig::default(), - None, + PruneModes::default(), ) .expect("failed to create tree"); @@ -1728,7 +1724,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1804,7 +1801,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1868,7 +1866,7 @@ mod tests { ); let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = exec5.hash_state_slow().construct_prefix_sets(); + let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze(); let state_root = StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); assert_eq!(state_root, block5.state_root); @@ -1889,7 +1887,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1987,7 +1986,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); let mut canon_notif = tree.subscribe_canon_state(); // genesis block 10 is already canonical @@ -2380,7 +2380,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); assert_eq!( tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), @@ -2400,8 +2401,8 @@ mod tests { tree.make_canonical(block2.hash()).unwrap(); // restart - let mut tree = - BlockchainTree::new(cloned_externals_1, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(cloned_externals_1, config, PruneModes::default()) + .expect("failed to create tree"); assert_eq!(tree.block_indices().last_finalized_block(), 0); let mut block1a = block1; @@ -2417,8 +2418,8 @@ mod tests { tree.finalize_block(block1a.number).unwrap(); // restart - let tree = - BlockchainTree::new(cloned_externals_2, config, None).expect("failed to create tree"); + let tree = BlockchainTree::new(cloned_externals_2, config, PruneModes::default()) + .expect("failed to create tree"); assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); } diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs index ab91ee5476a1..8a9893a1807f 100644 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ b/crates/blockchain-tree/src/canonical_chain.rs @@ -12,7 +12,7 @@ pub(crate) struct CanonicalChain { } impl CanonicalChain { - pub(crate) fn new(chain: BTreeMap) -> Self { + pub(crate) const fn new(chain: BTreeMap) -> Self { Self { chain } } diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index d53009b76609..1e6be8353497 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -13,12 +13,13 @@ use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_db_api::database::Database; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ BlockHash, BlockNumber, ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader, U256, }; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView}, - Chain, ExecutionOutcome, FullExecutionDataProvider, ProviderError, StateRootProvider, + FullExecutionDataProvider, ProviderError, StateRootProvider, }; use reth_revm::database::StateProviderDatabase; use reth_trie::updates::TrieUpdates; diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index 735f1db96f1c..5d44a6391178 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -89,7 +89,7 @@ pub(crate) enum MakeCanonicalAction { /// Inserting an old canonical chain. InsertOldCanonicalChain, /// Clearing trie updates of other children chains after fork choice update. - ClearTrieUpdatesForOtherChilds, + ClearTrieUpdatesForOtherChildren, } /// Canonicalization metrics @@ -118,7 +118,7 @@ struct MakeCanonicalMetrics { insert_old_canonical_chain: Histogram, /// Duration of the clear trie updates of other children chains after fork choice update /// action. - clear_trie_updates_for_other_childs: Histogram, + clear_trie_updates_for_other_children: Histogram, } impl MakeCanonicalMetrics { @@ -145,8 +145,8 @@ impl MakeCanonicalMetrics { MakeCanonicalAction::InsertOldCanonicalChain => { self.insert_old_canonical_chain.record(duration) } - MakeCanonicalAction::ClearTrieUpdatesForOtherChilds => { - self.clear_trie_updates_for_other_childs.record(duration) + MakeCanonicalAction::ClearTrieUpdatesForOtherChildren => { + self.clear_trie_updates_for_other_children.record(duration) } } } diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 3ff14ca04f06..d92131dc8ac2 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -27,7 +27,7 @@ pub struct NoopBlockchainTree { impl NoopBlockchainTree { /// Create a new `NoopBlockchainTree` with a canon state notification sender. - pub fn with_canon_state_notifications( + pub const fn with_canon_state_notifications( canon_state_notification_sender: CanonStateNotificationSender, ) -> Self { Self { canon_state_notification_sender: Some(canon_state_notification_sender) } diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml new file mode 100644 index 000000000000..e4574acdace9 --- /dev/null +++ b/crates/chainspec/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "reth-chainspec" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-ethereum-forks.workspace = true +reth-network-peers.workspace = true +reth-trie-common.workspace = true +reth-primitives-traits.workspace = true + +# ethereum +alloy-chains = { workspace = true, features = ["serde", "rlp"] } +alloy-eips = { workspace = true, features = ["serde"] } +alloy-genesis.workspace = true +alloy-primitives = { workspace = true, features = ["rand", "rlp"] } +alloy-trie.workspace = true + +# op +op-alloy-rpc-types = { workspace = true, optional = true } + + +# misc +once_cell.workspace = true +serde = { workspace = true, optional = true } +serde_json.workspace = true +derive_more.workspace = true + +[dev-dependencies] +# eth +nybbles = { workspace = true, features = ["arbitrary"] } +alloy-trie = { workspace = true, features = ["arbitrary"] } +alloy-eips = { workspace = true, features = ["arbitrary"] } +alloy-rlp = { workspace = true, features = ["arrayvec"] } +alloy-genesis.workspace = true +reth-rpc-types.workspace = true +rand.workspace = true + +# op +op-alloy-rpc-types.workspace = true + +[features] +default = ["std"] +optimism = [ + "reth-ethereum-forks/optimism", + "serde", + "dep:op-alloy-rpc-types", +] +std = [] +arbitrary = [ + "alloy-chains/arbitrary" +] + + diff --git a/crates/primitives/res/genesis/base.json b/crates/chainspec/res/genesis/base.json similarity index 100% rename from crates/primitives/res/genesis/base.json rename to crates/chainspec/res/genesis/base.json diff --git a/crates/primitives/res/genesis/dev.json b/crates/chainspec/res/genesis/dev.json similarity index 100% rename from crates/primitives/res/genesis/dev.json rename to crates/chainspec/res/genesis/dev.json diff --git a/crates/primitives/res/genesis/goerli.json b/crates/chainspec/res/genesis/goerli.json similarity index 100% rename from crates/primitives/res/genesis/goerli.json rename to crates/chainspec/res/genesis/goerli.json diff --git a/crates/primitives/res/genesis/holesky.json b/crates/chainspec/res/genesis/holesky.json similarity index 100% rename from crates/primitives/res/genesis/holesky.json rename to crates/chainspec/res/genesis/holesky.json diff --git a/crates/primitives/res/genesis/mainnet.json b/crates/chainspec/res/genesis/mainnet.json similarity index 100% rename from crates/primitives/res/genesis/mainnet.json rename to crates/chainspec/res/genesis/mainnet.json diff --git a/crates/primitives/res/genesis/optimism.json b/crates/chainspec/res/genesis/optimism.json similarity index 100% rename from crates/primitives/res/genesis/optimism.json rename to crates/chainspec/res/genesis/optimism.json diff --git a/crates/primitives/res/genesis/sepolia.json b/crates/chainspec/res/genesis/sepolia.json similarity index 100% rename from crates/primitives/res/genesis/sepolia.json rename to crates/chainspec/res/genesis/sepolia.json diff --git a/crates/primitives/res/genesis/sepolia_base.json b/crates/chainspec/res/genesis/sepolia_base.json similarity index 100% rename from crates/primitives/res/genesis/sepolia_base.json rename to crates/chainspec/res/genesis/sepolia_base.json diff --git a/crates/primitives/res/genesis/sepolia_op.json b/crates/chainspec/res/genesis/sepolia_op.json similarity index 100% rename from crates/primitives/res/genesis/sepolia_op.json rename to crates/chainspec/res/genesis/sepolia_op.json diff --git a/crates/primitives/res/genesis/weave_wm_testnet_v0.json b/crates/chainspec/res/genesis/weave_wm_testnet_v0.json similarity index 100% rename from crates/primitives/res/genesis/weave_wm_testnet_v0.json rename to crates/chainspec/res/genesis/weave_wm_testnet_v0.json diff --git a/crates/chainspec/src/constants/mod.rs b/crates/chainspec/src/constants/mod.rs new file mode 100644 index 000000000000..cde927189c8b --- /dev/null +++ b/crates/chainspec/src/constants/mod.rs @@ -0,0 +1,12 @@ +use crate::spec::DepositContract; +use alloy_primitives::{address, b256}; + +/// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa` +pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( + address!("00000000219ab540356cbb839cbe05303d7705fa"), + 11052984, + b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), +); + +#[cfg(feature = "optimism")] +pub(crate) mod optimism; diff --git a/crates/chainspec/src/constants/optimism.rs b/crates/chainspec/src/constants/optimism.rs new file mode 100644 index 000000000000..1c32df6f37ed --- /dev/null +++ b/crates/chainspec/src/constants/optimism.rs @@ -0,0 +1,46 @@ +use alloy_eips::eip1559::BaseFeeParams; +use reth_primitives_traits::constants::{ + BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, + OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, + OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Base Sepolia. +pub(crate) const BASE_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + elasticity_multiplier: BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Base Sepolia (post Canyon). +pub(crate) const BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + elasticity_multiplier: BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Sepolia. +pub(crate) const OP_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Sepolia (post Canyon). +pub(crate) const OP_SEPOLIA_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Mainnet. +pub(crate) const OP_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; + +/// Get the base fee parameters for Optimism Mainnet (post Canyon). +pub(crate) const OP_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { + max_change_denominator: OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, + elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, +}; diff --git a/crates/primitives/src/chain/info.rs b/crates/chainspec/src/info.rs similarity index 86% rename from crates/primitives/src/chain/info.rs rename to crates/chainspec/src/info.rs index 38b73e2768ae..6fe82d0a249b 100644 --- a/crates/primitives/src/chain/info.rs +++ b/crates/chainspec/src/info.rs @@ -1,4 +1,5 @@ -use crate::{BlockNumHash, BlockNumber, B256}; +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockNumber, B256}; /// Current status of the blockchain's head. #[derive(Default, Copy, Clone, Debug, Eq, PartialEq)] diff --git a/crates/primitives/src/chain/mod.rs b/crates/chainspec/src/lib.rs similarity index 70% rename from crates/primitives/src/chain/mod.rs rename to crates/chainspec/src/lib.rs index 727abe038112..17f766f5b0fd 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/chainspec/src/lib.rs @@ -1,26 +1,44 @@ +//! The spec of an Ethereum network + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ - AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, - DepositContract, DisplayHardforks, ForkBaseFeeParams, ForkCondition, DEV, GOERLI, HOLESKY, - MAINNET, SEPOLIA, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, + ForkBaseFeeParams, DEV, HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "optimism")] pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; -#[cfg(feature = "optimism")] -#[cfg(test)] -pub(crate) use spec::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; +#[cfg(not(feature = "std"))] +extern crate alloc; -// The chain spec module. -mod spec; -// The chain info module. +// /// The config info module namely spec id. +// pub mod config; +/// The chain info module. mod info; +/// The chain spec module. +mod spec; + +/// Chain specific constants +pub(crate) mod constants; + +/// Re-export for convenience +pub use reth_ethereum_forks::*; + #[cfg(test)] mod tests { use super::*; - use crate::U256; + use alloy_primitives::U256; use alloy_rlp::Encodable; use std::str::FromStr; @@ -32,8 +50,8 @@ mod tests { #[test] fn test_named_id() { - let chain = Chain::from_named(NamedChain::Goerli); - assert_eq!(chain.id(), 5); + let chain = Chain::from_named(NamedChain::Holesky); + assert_eq!(chain.id(), 17000); } #[test] @@ -59,9 +77,9 @@ mod tests { #[test] fn test_into_u256() { - let chain = Chain::from_named(NamedChain::Goerli); + let chain = Chain::from_named(NamedChain::Holesky); let n: U256 = U256::from(chain.id()); - let expected = U256::from(5); + let expected = U256::from(17000); assert_eq!(n, expected); } diff --git a/crates/primitives/src/chain/spec.rs b/crates/chainspec/src/spec.rs similarity index 66% rename from crates/primitives/src/chain/spec.rs rename to crates/chainspec/src/spec.rs index abdd1775e287..38521cf93a2b 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1,74 +1,54 @@ -use crate::{ +use crate::constants::MAINNET_DEPOSIT_CONTRACT; +#[cfg(not(feature = "std"))] +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloy_chains::{Chain, ChainKind, NamedChain}; +use alloy_genesis::Genesis; +use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; +use alloy_trie::EMPTY_ROOT_HASH; +use derive_more::From; +use once_cell::sync::Lazy; +use reth_ethereum_forks::{ + ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, + ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Head, DEV_HARDFORKS, +}; +use reth_network_peers::NodeRecord; +use reth_primitives_traits::{ constants::{ - EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_ROOT_HASH, EMPTY_TRANSACTIONS, - EMPTY_WITHDRAWALS, + DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, HOLESKY_GENESIS_HASH, + MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }, - holesky_nodes, - net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, - proofs::state_root_ref_unhashed, - revm_primitives::{address, b256}, - Address, BlockNumber, Chain, ChainKind, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, - Hardfork, Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, - MAINNET_DEPOSIT_CONTRACT, U256, -}; -use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt::{Display, Formatter}, - sync::Arc, + Header, SealedHeader, }; +use reth_trie_common::root::state_root_ref_unhashed; +#[cfg(feature = "std")] +use std::sync::Arc; +#[cfg(feature = "optimism")] +use crate::constants::optimism::{ + BASE_SEPOLIA_BASE_FEE_PARAMS, BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS, OP_BASE_FEE_PARAMS, + OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS, +}; pub use alloy_eips::eip1559::BaseFeeParams; - #[cfg(feature = "optimism")] -pub(crate) use crate::{ - constants::{ - OP_BASE_FEE_PARAMS, OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, - OP_SEPOLIA_CANYON_BASE_FEE_PARAMS, - }, - net::{base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes}, +use reth_ethereum_forks::OptimismHardfork; +use reth_network_peers::{ + base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, + sepolia_nodes, }; /// The Ethereum mainnet spec pub static MAINNET: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::mainnet(), - genesis: serde_json::from_str(include_str!("../../res/genesis/mainnet.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/mainnet.json")) .expect("Can't deserialize Mainnet genesis json"), - genesis_hash: Some(b256!( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" - )), + genesis_hash: Some(MAINNET_GENESIS_HASH), // paris_block_and_final_difficulty: Some(( 15537394, U256::from(58_750_003_716_598_352_816_469u128), )), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(1150000)), - (Hardfork::Dao, ForkCondition::Block(1920000)), - (Hardfork::Tangerine, ForkCondition::Block(2463000)), - (Hardfork::SpuriousDragon, ForkCondition::Block(2675000)), - (Hardfork::Byzantium, ForkCondition::Block(4370000)), - (Hardfork::Constantinople, ForkCondition::Block(7280000)), - (Hardfork::Petersburg, ForkCondition::Block(7280000)), - (Hardfork::Istanbul, ForkCondition::Block(9069000)), - (Hardfork::MuirGlacier, ForkCondition::Block(9200000)), - (Hardfork::Berlin, ForkCondition::Block(12244000)), - (Hardfork::London, ForkCondition::Block(12965000)), - (Hardfork::ArrowGlacier, ForkCondition::Block(13773000)), - (Hardfork::GrayGlacier, ForkCondition::Block(15050000)), - ( - Hardfork::Paris, - ForkCondition::TTD { - fork_block: None, - total_difficulty: U256::from(58_750_000_000_000_000_000_000_u128), - }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1681338455)), - (Hardfork::Cancun, ForkCondition::Timestamp(1710338135)), - ]), + hardforks: EthereumHardfork::mainnet().into(), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(DepositContract::new( address!("00000000219ab540356cbb839cbe05303d7705fa"), @@ -81,82 +61,16 @@ pub static MAINNET: Lazy> = Lazy::new(|| { .into() }); -/// The Goerli spec -pub static GOERLI: Lazy> = Lazy::new(|| { - ChainSpec { - chain: Chain::goerli(), - genesis: serde_json::from_str(include_str!("../../res/genesis/goerli.json")) - .expect("Can't deserialize Goerli genesis json"), - genesis_hash: Some(b256!( - "bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a" - )), - // - paris_block_and_final_difficulty: Some((7382818, U256::from(10_790_000))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(1561651)), - (Hardfork::Berlin, ForkCondition::Block(4460644)), - (Hardfork::London, ForkCondition::Block(5062605)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: None, total_difficulty: U256::from(10_790_000) }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1678832736)), - (Hardfork::Cancun, ForkCondition::Timestamp(1705473120)), - ]), - // https://goerli.etherscan.io/tx/0xa3c07dc59bfdb1bfc2d50920fed2ef2c1c4e0a09fe2325dbc14e07702f965a78 - deposit_contract: Some(DepositContract::new( - address!("ff50ed3d0ec03ac01d4c79aad74928bff48a7b2b"), - 4367322, - b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), - )), - base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), - prune_delete_limit: 1700, - } - .into() -}); - /// The Sepolia spec pub static SEPOLIA: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::sepolia(), - genesis: serde_json::from_str(include_str!("../../res/genesis/sepolia.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/sepolia.json")) .expect("Can't deserialize Sepolia genesis json"), - genesis_hash: Some(b256!( - "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9" - )), + genesis_hash: Some(SEPOLIA_GENESIS_HASH), // paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { - fork_block: Some(1735371), - total_difficulty: U256::from(17_000_000_000_000_000u64), - }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1677557088)), - (Hardfork::Cancun, ForkCondition::Timestamp(1706655072)), - ]), + hardforks: EthereumHardfork::sepolia().into(), // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 deposit_contract: Some(DepositContract::new( address!("7f02c3e3c98b133055b8b348b2ac625669ed295d"), @@ -173,32 +87,11 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { pub static HOLESKY: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::holesky(), - genesis: serde_json::from_str(include_str!("../../res/genesis/holesky.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/holesky.json")) .expect("Can't deserialize Holesky genesis json"), - genesis_hash: Some(b256!( - "b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4" - )), + genesis_hash: Some(HOLESKY_GENESIS_HASH), paris_block_and_final_difficulty: Some((0, U256::from(1))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1696000704)), - (Hardfork::Cancun, ForkCondition::Timestamp(1707305664)), - ]), + hardforks: EthereumHardfork::holesky().into(), deposit_contract: Some(DepositContract::new( address!("4242424242424242424242424242424242424242"), 0, @@ -217,38 +110,11 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { pub static DEV: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::dev(), - genesis: serde_json::from_str(include_str!("../../res/genesis/dev.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) .expect("Can't deserialize Dev testnet genesis json"), - genesis_hash: Some(b256!( - "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c" - )), + genesis_hash: Some(DEV_GENESIS_HASH), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(0)), - (Hardfork::Cancun, ForkCondition::Timestamp(0)), - #[cfg(feature = "optimism")] - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - #[cfg(feature = "optimism")] - (Hardfork::Bedrock, ForkCondition::Block(0)), - #[cfg(feature = "optimism")] - (Hardfork::Ecotone, ForkCondition::Timestamp(0)), - ]), + hardforks: DEV_HARDFORKS.clone(), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), deposit_contract: None, // TODO: do we even have? ..Default::default() @@ -263,42 +129,17 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { chain: Chain::optimism_mainnet(), // genesis contains empty alloc field because state at first bedrock block is imported // manually from trusted source - genesis: serde_json::from_str(include_str!("../../res/genesis/optimism.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/optimism.json")) .expect("Can't deserialize Optimism Mainnet genesis json"), genesis_hash: Some(b256!( "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(3950000)), - (Hardfork::London, ForkCondition::Block(105235063)), - (Hardfork::ArrowGlacier, ForkCondition::Block(105235063)), - (Hardfork::GrayGlacier, ForkCondition::Block(105235063)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::from(0) }, - ), - (Hardfork::Bedrock, ForkCondition::Block(105235063)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), - (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), - (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), - (Hardfork::Fjord, ForkCondition::Timestamp(1720627201)), - ]), + hardforks: OptimismHardfork::op_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), OP_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), OP_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -313,42 +154,17 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::from_named(NamedChain::OptimismSepolia), - genesis: serde_json::from_str(include_str!("../../res/genesis/sepolia_op.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/sepolia_op.json")) .expect("Can't deserialize OP Sepolia genesis json"), genesis_hash: Some(b256!( "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1699981200)), - (Hardfork::Canyon, ForkCondition::Timestamp(1699981200)), - (Hardfork::Cancun, ForkCondition::Timestamp(1708534800)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1708534800)), - (Hardfork::Fjord, ForkCondition::Timestamp(1716998400)), - ]), + hardforks: OptimismHardfork::op_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), OP_SEPOLIA_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -363,42 +179,17 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::base_sepolia(), - genesis: serde_json::from_str(include_str!("../../res/genesis/sepolia_base.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/sepolia_base.json")) .expect("Can't deserialize Base Sepolia genesis json"), genesis_hash: Some(b256!( "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1699981200)), - (Hardfork::Canyon, ForkCondition::Timestamp(1699981200)), - (Hardfork::Cancun, ForkCondition::Timestamp(1708534800)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1708534800)), - (Hardfork::Fjord, ForkCondition::Timestamp(1716998400)), - ]), + hardforks: OptimismHardfork::base_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), BASE_SEPOLIA_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -413,42 +204,17 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { pub static BASE_MAINNET: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::base_mainnet(), - genesis: serde_json::from_str(include_str!("../../res/genesis/base.json")) + genesis: serde_json::from_str(include_str!("../res/genesis/base.json")) .expect("Can't deserialize Base genesis json"), genesis_hash: Some(b256!( "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), - (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), - (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), - (Hardfork::Fjord, ForkCondition::Timestamp(1720627201)), - ]), + hardforks: OptimismHardfork::base_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), OP_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), OP_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -460,8 +226,7 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { /// A wrapper around [`BaseFeeParams`] that allows for specifying constant or dynamic EIP-1559 /// parameters based on the active [Hardfork]. -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -#[serde(untagged)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum BaseFeeParamsKind { /// Constant [`BaseFeeParams`]; used for chains that don't have dynamic EIP-1559 parameters Constant(BaseFeeParams), @@ -470,6 +235,12 @@ pub enum BaseFeeParamsKind { Variable(ForkBaseFeeParams), } +impl Default for BaseFeeParamsKind { + fn default() -> Self { + BaseFeeParams::ethereum().into() + } +} + impl From for BaseFeeParamsKind { fn from(params: BaseFeeParams) -> Self { Self::Constant(params) @@ -484,12 +255,14 @@ impl From for BaseFeeParamsKind { /// A type alias to a vector of tuples of [Hardfork] and [`BaseFeeParams`], sorted by [Hardfork] /// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -pub struct ForkBaseFeeParams(Vec<(Hardfork, BaseFeeParams)>); +#[derive(Clone, Debug, PartialEq, Eq, From)] +pub struct ForkBaseFeeParams(Vec<(Box, BaseFeeParams)>); + +impl core::ops::Deref for ChainSpec { + type Target = ChainHardforks; -impl From> for ForkBaseFeeParams { - fn from(params: Vec<(Hardfork, BaseFeeParams)>) -> Self { - Self(params) + fn deref(&self) -> &Self::Target { + &self.hardforks } } @@ -500,7 +273,7 @@ impl From> for ForkBaseFeeParams { /// - Meta-information about the chain (the chain ID) /// - The genesis block of the chain ([`Genesis`]) /// - What hardforks are activated, and under which conditions -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct ChainSpec { /// The chain ID pub chain: Chain, @@ -509,22 +282,19 @@ pub struct ChainSpec { /// /// This acts as a small cache for known chains. If the chain is known, then the genesis hash /// is also known ahead of time, and this will be `Some`. - #[serde(skip, default)] pub genesis_hash: Option, /// The genesis block pub genesis: Genesis, - /// The block at which [`Hardfork::Paris`] was activated and the final difficulty at this - /// block. - #[serde(skip, default)] + /// The block at which [`EthereumHardfork::Paris`] was activated and the final difficulty at + /// this block. pub paris_block_and_final_difficulty: Option<(u64, U256)>, /// The active hard forks and their activation conditions - pub hardforks: BTreeMap, + pub hardforks: ChainHardforks, /// The deposit contract deployed for `PoS` - #[serde(skip, default)] pub deposit_contract: Option, /// The parameters that configure how a block's base fee is computed @@ -533,7 +303,6 @@ pub struct ChainSpec { /// The delete limit for pruner, per block. In the actual pruner run it will be multiplied by /// the amount of blocks between pruner runs to account for the difference in amount of new /// data coming in. - #[serde(default)] pub prune_delete_limit: usize, } @@ -580,7 +349,7 @@ impl ChainSpec { #[inline] #[cfg(feature = "optimism")] pub fn is_optimism(&self) -> bool { - self.chain.is_optimism() || self.hardforks.contains_key(&Hardfork::Bedrock) + self.chain.is_optimism() || self.hardforks.get(OptimismHardfork::Bedrock).is_some() } /// Returns `true` if this chain contains Optimism configuration. @@ -611,7 +380,7 @@ impl ChainSpec { // If shanghai is activated, initialize the header with an empty withdrawals hash, and // empty withdrawals list. let withdrawals_root = self - .fork(Hardfork::Shanghai) + .fork(EthereumHardfork::Shanghai) .active_at_timestamp(self.genesis.timestamp) .then_some(EMPTY_WITHDRAWALS); @@ -636,12 +405,6 @@ impl ChainSpec { }; Header { - parent_hash: B256::ZERO, - number: 0, - transactions_root: EMPTY_TRANSACTIONS, - ommers_hash: EMPTY_OMMER_ROOT_HASH, - receipts_root: EMPTY_RECEIPTS, - logs_bloom: Default::default(), gas_limit: self.genesis.gas_limit as u64, difficulty: self.genesis.difficulty, nonce: self.genesis.nonce, @@ -650,13 +413,13 @@ impl ChainSpec { timestamp: self.genesis.timestamp, mix_hash: self.genesis.mix_hash, beneficiary: self.genesis.coinbase, - gas_used: Default::default(), base_fee_per_gas, withdrawals_root, parent_beacon_block_root, blob_gas_used, excess_blob_gas, requests_root, + ..Default::default() } } @@ -672,7 +435,7 @@ impl ChainSpec { self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(EIP1559_INITIAL_BASE_FEE); // If London is activated at genesis, we set the initial base fee as per EIP-1559. - self.fork(Hardfork::London).active_at_block(0).then_some(genesis_base_fee) + self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee) } /// Get the [`BaseFeeParams`] for the chain at the given timestamp. @@ -684,8 +447,8 @@ impl ChainSpec { // first one that corresponds to a hardfork that is active at the // given timestamp. for (fork, params) in bf_params.iter().rev() { - if self.is_fork_active_at_timestamp(*fork, timestamp) { - return *params; + if self.hardforks.is_fork_active_at_timestamp(fork.clone(), timestamp) { + return *params } } @@ -703,8 +466,8 @@ impl ChainSpec { // first one that corresponds to a hardfork that is active at the // given timestamp. for (fork, params) in bf_params.iter().rev() { - if self.is_fork_active_at_block(*fork, block_number) { - return *params; + if self.hardforks.is_fork_active_at_block(fork.clone(), block_number) { + return *params } } @@ -740,123 +503,55 @@ impl ChainSpec { } /// Get the fork filter for the given hardfork - pub fn hardfork_fork_filter(&self, fork: Hardfork) -> Option { - match self.fork(fork) { + pub fn hardfork_fork_filter(&self, fork: H) -> Option { + match self.hardforks.fork(fork.clone()) { ForkCondition::Never => None, - _ => Some(self.fork_filter(self.satisfy(self.fork(fork)))), + _ => Some(self.fork_filter(self.satisfy(self.hardforks.fork(fork)))), } } - /// Returns the forks in this specification and their activation conditions. - pub const fn hardforks(&self) -> &BTreeMap { - &self.hardforks - } - /// Returns the hardfork display helper. pub fn display_hardforks(&self) -> DisplayHardforks { DisplayHardforks::new( - self.hardforks(), + &self.hardforks, self.paris_block_and_final_difficulty.map(|(block, _)| block), ) } /// Get the fork id for the given hardfork. #[inline] - pub fn hardfork_fork_id(&self, fork: Hardfork) -> Option { - match self.fork(fork) { + pub fn hardfork_fork_id(&self, fork: H) -> Option { + let condition = self.hardforks.fork(fork); + match condition { ForkCondition::Never => None, - _ => Some(self.fork_id(&self.satisfy(self.fork(fork)))), + _ => Some(self.fork_id(&self.satisfy(condition))), } } - /// Convenience method to get the fork id for [`Hardfork::Shanghai`] from a given chainspec. + /// Convenience method to get the fork id for [`EthereumHardfork::Shanghai`] from a given + /// chainspec. #[inline] pub fn shanghai_fork_id(&self) -> Option { - self.hardfork_fork_id(Hardfork::Shanghai) + self.hardfork_fork_id(EthereumHardfork::Shanghai) } - /// Convenience method to get the fork id for [`Hardfork::Cancun`] from a given chainspec. + /// Convenience method to get the fork id for [`EthereumHardfork::Cancun`] from a given + /// chainspec. #[inline] pub fn cancun_fork_id(&self) -> Option { - self.hardfork_fork_id(Hardfork::Cancun) + self.hardfork_fork_id(EthereumHardfork::Cancun) } /// Convenience method to get the latest fork id from the chainspec. Panics if chainspec has no /// hardforks. #[inline] pub fn latest_fork_id(&self) -> ForkId { - self.hardfork_fork_id(*self.hardforks().last_key_value().unwrap().0).unwrap() - } - - /// Get the fork condition for the given fork. - pub fn fork(&self, fork: Hardfork) -> ForkCondition { - self.hardforks.get(&fork).copied().unwrap_or(ForkCondition::Never) - } - - /// Get an iterator of all hardforks with their respective activation conditions. - pub fn forks_iter(&self) -> impl Iterator + '_ { - self.hardforks.iter().map(|(f, b)| (*f, *b)) - } - - /// Convenience method to check if a fork is active at a given timestamp. - #[inline] - pub fn is_fork_active_at_timestamp(&self, fork: Hardfork, timestamp: u64) -> bool { - self.fork(fork).active_at_timestamp(timestamp) - } - - /// Convenience method to check if a fork is active at a given block number - #[inline] - pub fn is_fork_active_at_block(&self, fork: Hardfork, block_number: u64) -> bool { - self.fork(fork).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Shanghai`] is active at a given timestamp. - #[inline] - pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp) - } - - /// Convenience method to check if [`Hardfork::Cancun`] is active at a given timestamp. - #[inline] - pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp) - } - - /// Convenience method to check if [`Hardfork::Prague`] is active at a given timestamp. - #[inline] - pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp) - } - - /// Convenience method to check if [`Hardfork::Byzantium`] is active at a given block number. - #[inline] - pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Byzantium).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::SpuriousDragon`] is active at a given block - /// number. - #[inline] - pub fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::SpuriousDragon).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Homestead`] is active at a given block number. - #[inline] - pub fn is_homestead_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Homestead).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Bedrock`] is active at a given block number. - #[cfg(feature = "optimism")] - #[inline] - pub fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Bedrock).active_at_block(block_number) + self.hardfork_fork_id(self.hardforks.last().unwrap().0).unwrap() } /// Creates a [`ForkFilter`] for the block described by [Head]. pub fn fork_filter(&self, head: Head) -> ForkFilter { - let forks = self.forks_iter().filter_map(|(_, condition)| { + let forks = self.hardforks.forks_iter().filter_map(|(_, condition)| { // We filter out TTD-based forks w/o a pre-known block since those do not show up in the // fork filter. Some(match condition { @@ -876,7 +571,7 @@ impl ChainSpec { let mut current_applied = 0; // handle all block forks before handling timestamp based forks. see: https://eips.ethereum.org/EIPS/eip-6122 - for (_, cond) in self.forks_iter() { + for (_, cond) in self.hardforks.forks_iter() { // handle block based forks and the sepolia merge netsplit block edge case (TTD // ForkCondition with Some(block)) if let ForkCondition::Block(block) | @@ -898,7 +593,7 @@ impl ChainSpec { // timestamp are ALWAYS applied after the merge. // // this filter ensures that no block-based forks are returned - for timestamp in self.forks_iter().filter_map(|(_, cond)| { + for timestamp in self.hardforks.forks_iter().filter_map(|(_, cond)| { cond.as_timestamp().filter(|time| time > &self.genesis.timestamp) }) { let cond = ForkCondition::Timestamp(timestamp); @@ -942,7 +637,7 @@ impl ChainSpec { /// /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { - let mut hardforks_iter = self.forks_iter().peekable(); + let mut hardforks_iter = self.hardforks.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { // peek and find the first occurrence of ForkCondition::TTD (merge) , or in @@ -985,16 +680,11 @@ impl ChainSpec { let chain = self.chain; match chain.try_into().ok()? { C::Mainnet => Some(mainnet_nodes()), - C::Goerli => Some(goerli_nodes()), C::Sepolia => Some(sepolia_nodes()), C::Holesky => Some(holesky_nodes()), - #[cfg(feature = "optimism")] C::Base => Some(base_nodes()), - #[cfg(feature = "optimism")] C::Optimism => Some(op_nodes()), - #[cfg(feature = "optimism")] C::BaseGoerli | C::BaseSepolia => Some(base_testnet_nodes()), - #[cfg(feature = "optimism")] C::OptimismSepolia | C::OptimismGoerli | C::OptimismKovan => Some(op_testnet_nodes()), _ => None, } @@ -1005,40 +695,43 @@ impl From for ChainSpec { fn from(genesis: Genesis) -> Self { #[cfg(feature = "optimism")] let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + #[cfg(feature = "optimism")] + let genesis_info = + optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); // Block-based hardforks let hardfork_opts = [ - (Hardfork::Homestead, genesis.config.homestead_block), - (Hardfork::Dao, genesis.config.dao_fork_block), - (Hardfork::Tangerine, genesis.config.eip150_block), - (Hardfork::SpuriousDragon, genesis.config.eip155_block), - (Hardfork::Byzantium, genesis.config.byzantium_block), - (Hardfork::Constantinople, genesis.config.constantinople_block), - (Hardfork::Petersburg, genesis.config.petersburg_block), - (Hardfork::Istanbul, genesis.config.istanbul_block), - (Hardfork::MuirGlacier, genesis.config.muir_glacier_block), - (Hardfork::Berlin, genesis.config.berlin_block), - (Hardfork::London, genesis.config.london_block), - (Hardfork::ArrowGlacier, genesis.config.arrow_glacier_block), - (Hardfork::GrayGlacier, genesis.config.gray_glacier_block), + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), + (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), #[cfg(feature = "optimism")] - (Hardfork::Bedrock, optimism_genesis_info.bedrock_block), + (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), ]; let mut hardforks = hardfork_opts - .iter() - .filter_map(|(hardfork, opt)| opt.map(|block| (*hardfork, ForkCondition::Block(block)))) - .collect::>(); + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::>(); // Paris let paris_block_and_final_difficulty = if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.insert( - Hardfork::Paris, + hardforks.push(( + EthereumHardfork::Paris.boxed(), ForkCondition::TTD { total_difficulty: ttd, fork_block: genesis.config.merge_netsplit_block, }, - ); + )); genesis.config.merge_netsplit_block.map(|block| (block, ttd)) } else { @@ -1047,28 +740,45 @@ impl From for ChainSpec { // Time-based hardforks let time_hardfork_opts = [ - (Hardfork::Shanghai, genesis.config.shanghai_time), - (Hardfork::Cancun, genesis.config.cancun_time), - (Hardfork::Prague, genesis.config.prague_time), + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), + (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), #[cfg(feature = "optimism")] - (Hardfork::Regolith, optimism_genesis_info.regolith_time), + (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), #[cfg(feature = "optimism")] - (Hardfork::Canyon, optimism_genesis_info.canyon_time), + (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), #[cfg(feature = "optimism")] - (Hardfork::Ecotone, optimism_genesis_info.ecotone_time), + (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), #[cfg(feature = "optimism")] - (Hardfork::Fjord, optimism_genesis_info.fjord_time), + (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), ]; let time_hardforks = time_hardfork_opts - .iter() + .into_iter() .filter_map(|(hardfork, opt)| { - opt.map(|time| (*hardfork, ForkCondition::Timestamp(time))) + opt.map(|time| (hardfork, ForkCondition::Timestamp(time))) }) - .collect::>(); + .collect::>(); hardforks.extend(time_hardforks); + // Uses ethereum or optimism main chains to find proper order + #[cfg(not(feature = "optimism"))] + let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); + #[cfg(not(feature = "optimism"))] + let mainnet_order = mainnet_hardforks.forks_iter(); + #[cfg(feature = "optimism")] + let mainnet_hardforks = OptimismHardfork::op_mainnet(); + #[cfg(feature = "optimism")] + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(hardforks[pos].clone()); + } + } + // NOTE: in full node, we prune all receipts except the deposit contract's. We do not // have the deployment block in the genesis file, so we use block zero. We use the same // deposit topic as the mainnet contract if we have the deposit contract address in the @@ -1081,7 +791,7 @@ impl From for ChainSpec { chain: genesis.config.chain_id.into(), genesis, genesis_hash: None, - hardforks, + hardforks: ChainHardforks::new(hardforks), paris_block_and_final_difficulty, deposit_contract, #[cfg(feature = "optimism")] @@ -1091,49 +801,12 @@ impl From for ChainSpec { } } -/// A helper type for compatibility with geth's config -#[derive(Debug, Clone, Deserialize, Serialize)] -#[serde(untagged)] -pub enum AllGenesisFormats { - /// The reth genesis format - Reth(ChainSpec), - /// The geth genesis format - Geth(Genesis), -} - -impl From for AllGenesisFormats { - fn from(genesis: Genesis) -> Self { - Self::Geth(genesis) - } -} - -impl From for AllGenesisFormats { - fn from(genesis: ChainSpec) -> Self { - Self::Reth(genesis) - } -} - -impl From> for AllGenesisFormats { - fn from(genesis: Arc) -> Self { - Arc::try_unwrap(genesis).unwrap_or_else(|arc| (*arc).clone()).into() - } -} - -impl From for ChainSpec { - fn from(genesis: AllGenesisFormats) -> Self { - match genesis { - AllGenesisFormats::Geth(genesis) => genesis.into(), - AllGenesisFormats::Reth(genesis) => genesis, - } - } -} - /// A helper to build custom chain specs #[derive(Debug, Default, Clone)] pub struct ChainSpecBuilder { chain: Option, genesis: Option, - hardforks: BTreeMap, + hardforks: ChainHardforks, } impl ChainSpecBuilder { @@ -1145,7 +818,9 @@ impl ChainSpecBuilder { hardforks: MAINNET.hardforks.clone(), } } +} +impl ChainSpecBuilder { /// Set the chain ID pub const fn chain(mut self, chain: Chain) -> Self { self.chain = Some(chain); @@ -1159,14 +834,14 @@ impl ChainSpecBuilder { } /// Add the given fork with the given activation condition to the spec. - pub fn with_fork(mut self, fork: Hardfork, condition: ForkCondition) -> Self { + pub fn with_fork(mut self, fork: EthereumHardfork, condition: ForkCondition) -> Self { self.hardforks.insert(fork, condition); self } /// Remove the given fork from the spec. - pub fn without_fork(mut self, fork: Hardfork) -> Self { - self.hardforks.remove(&fork); + pub fn without_fork(mut self, fork: EthereumHardfork) -> Self { + self.hardforks.remove(fork); self } @@ -1175,77 +850,77 @@ impl ChainSpecBuilder { /// Does not set the merge netsplit block. pub fn paris_at_ttd(self, ttd: U256) -> Self { self.with_fork( - Hardfork::Paris, + EthereumHardfork::Paris, ForkCondition::TTD { total_difficulty: ttd, fork_block: None }, ) } /// Enable Frontier at genesis. pub fn frontier_activated(mut self) -> Self { - self.hardforks.insert(Hardfork::Frontier, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Frontier, ForkCondition::Block(0)); self } /// Enable Homestead at genesis. pub fn homestead_activated(mut self) -> Self { self = self.frontier_activated(); - self.hardforks.insert(Hardfork::Homestead, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Homestead, ForkCondition::Block(0)); self } /// Enable Tangerine at genesis. pub fn tangerine_whistle_activated(mut self) -> Self { self = self.homestead_activated(); - self.hardforks.insert(Hardfork::Tangerine, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Tangerine, ForkCondition::Block(0)); self } /// Enable Spurious Dragon at genesis. pub fn spurious_dragon_activated(mut self) -> Self { self = self.tangerine_whistle_activated(); - self.hardforks.insert(Hardfork::SpuriousDragon, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0)); self } /// Enable Byzantium at genesis. pub fn byzantium_activated(mut self) -> Self { self = self.spurious_dragon_activated(); - self.hardforks.insert(Hardfork::Byzantium, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Byzantium, ForkCondition::Block(0)); self } /// Enable Constantinople at genesis. pub fn constantinople_activated(mut self) -> Self { self = self.byzantium_activated(); - self.hardforks.insert(Hardfork::Constantinople, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Constantinople, ForkCondition::Block(0)); self } /// Enable Petersburg at genesis. pub fn petersburg_activated(mut self) -> Self { self = self.constantinople_activated(); - self.hardforks.insert(Hardfork::Petersburg, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Petersburg, ForkCondition::Block(0)); self } /// Enable Istanbul at genesis. pub fn istanbul_activated(mut self) -> Self { self = self.petersburg_activated(); - self.hardforks.insert(Hardfork::Istanbul, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Istanbul, ForkCondition::Block(0)); self } /// Enable Berlin at genesis. pub fn berlin_activated(mut self) -> Self { self = self.istanbul_activated(); - self.hardforks.insert(Hardfork::Berlin, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Berlin, ForkCondition::Block(0)); self } /// Enable London at genesis. pub fn london_activated(mut self) -> Self { self = self.berlin_activated(); - self.hardforks.insert(Hardfork::London, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::London, ForkCondition::Block(0)); self } @@ -1253,7 +928,7 @@ impl ChainSpecBuilder { pub fn paris_activated(mut self) -> Self { self = self.london_activated(); self.hardforks.insert( - Hardfork::Paris, + EthereumHardfork::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, ); self @@ -1262,14 +937,14 @@ impl ChainSpecBuilder { /// Enable Shanghai at genesis. pub fn shanghai_activated(mut self) -> Self { self = self.paris_activated(); - self.hardforks.insert(Hardfork::Shanghai, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); self } /// Enable Cancun at genesis. pub fn cancun_activated(mut self) -> Self { self = self.shanghai_activated(); - self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); self } @@ -1277,7 +952,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn bedrock_activated(mut self) -> Self { self = self.paris_activated(); - self.hardforks.insert(Hardfork::Bedrock, ForkCondition::Block(0)); + self.hardforks.insert(OptimismHardfork::Bedrock, ForkCondition::Block(0)); self } @@ -1285,7 +960,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn regolith_activated(mut self) -> Self { self = self.bedrock_activated(); - self.hardforks.insert(Hardfork::Regolith, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Regolith, ForkCondition::Timestamp(0)); self } @@ -1294,8 +969,8 @@ impl ChainSpecBuilder { pub fn canyon_activated(mut self) -> Self { self = self.regolith_activated(); // Canyon also activates changes from L1's Shanghai hardfork - self.hardforks.insert(Hardfork::Shanghai, ForkCondition::Timestamp(0)); - self.hardforks.insert(Hardfork::Canyon, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); self } @@ -1303,8 +978,8 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn ecotone_activated(mut self) -> Self { self = self.canyon_activated(); - self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0)); - self.hardforks.insert(Hardfork::Ecotone, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); self } @@ -1312,7 +987,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn fjord_activated(mut self) -> Self { self = self.ecotone_activated(); - self.hardforks.insert(Hardfork::Fjord, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); self } @@ -1324,9 +999,9 @@ impl ChainSpecBuilder { /// [`Self::genesis`]) pub fn build(self) -> ChainSpec { let paris_block_and_final_difficulty = { - self.hardforks.get(&Hardfork::Paris).and_then(|cond| { + self.hardforks.get(EthereumHardfork::Paris).and_then(|cond| { if let ForkCondition::TTD { fork_block, total_difficulty } = cond { - fork_block.map(|fork_block| (fork_block, *total_difficulty)) + fork_block.map(|fork_block| (fork_block, total_difficulty)) } else { None } @@ -1354,275 +1029,6 @@ impl From<&Arc> for ChainSpecBuilder { } } -/// The condition at which a fork is activated. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] -pub enum ForkCondition { - /// The fork is activated after a certain block. - Block(BlockNumber), - /// The fork is activated after a total difficulty has been reached. - TTD { - /// The block number at which TTD is reached, if it is known. - /// - /// This should **NOT** be set unless you want this block advertised as [EIP-2124][eip2124] - /// `FORK_NEXT`. This is currently only the case for Sepolia and Holesky. - /// - /// [eip2124]: https://eips.ethereum.org/EIPS/eip-2124 - fork_block: Option, - /// The total difficulty after which the fork is activated. - total_difficulty: U256, - }, - /// The fork is activated after a specific timestamp. - Timestamp(u64), - /// The fork is never activated - #[default] - Never, -} - -impl ForkCondition { - /// Returns true if the fork condition is timestamp based. - pub const fn is_timestamp(&self) -> bool { - matches!(self, Self::Timestamp(_)) - } - - /// Checks whether the fork condition is satisfied at the given block. - /// - /// For TTD conditions, this will only return true if the activation block is already known. - /// - /// For timestamp conditions, this will always return false. - pub const fn active_at_block(&self, current_block: BlockNumber) -> bool { - matches!(self, Self::Block(block) - | Self::TTD { fork_block: Some(block), .. } if current_block >= *block) - } - - /// Checks if the given block is the first block that satisfies the fork condition. - /// - /// This will return false for any condition that is not block based. - pub const fn transitions_at_block(&self, current_block: BlockNumber) -> bool { - matches!(self, Self::Block(block) if current_block == *block) - } - - /// Checks whether the fork condition is satisfied at the given total difficulty and difficulty - /// of a current block. - /// - /// The fork is considered active if the _previous_ total difficulty is above the threshold. - /// To achieve that, we subtract the passed `difficulty` from the current block's total - /// difficulty, and check if it's above the Fork Condition's total difficulty (here: - /// `58_750_000_000_000_000_000_000`) - /// - /// This will return false for any condition that is not TTD-based. - pub fn active_at_ttd(&self, ttd: U256, difficulty: U256) -> bool { - matches!(self, Self::TTD { total_difficulty, .. } - if ttd.saturating_sub(difficulty) >= *total_difficulty) - } - - /// Checks whether the fork condition is satisfied at the given timestamp. - /// - /// This will return false for any condition that is not timestamp-based. - pub const fn active_at_timestamp(&self, timestamp: u64) -> bool { - matches!(self, Self::Timestamp(time) if timestamp >= *time) - } - - /// Checks whether the fork condition is satisfied at the given head block. - /// - /// This will return true if: - /// - /// - The condition is satisfied by the block number; - /// - The condition is satisfied by the timestamp; - /// - or the condition is satisfied by the total difficulty - pub fn active_at_head(&self, head: &Head) -> bool { - self.active_at_block(head.number) || - self.active_at_timestamp(head.timestamp) || - self.active_at_ttd(head.total_difficulty, head.difficulty) - } - - /// Get the total terminal difficulty for this fork condition. - /// - /// Returns `None` for fork conditions that are not TTD based. - pub const fn ttd(&self) -> Option { - match self { - Self::TTD { total_difficulty, .. } => Some(*total_difficulty), - _ => None, - } - } - - /// Returns the timestamp of the fork condition, if it is timestamp based. - pub const fn as_timestamp(&self) -> Option { - match self { - Self::Timestamp(timestamp) => Some(*timestamp), - _ => None, - } - } -} - -/// A container to pretty-print a hardfork. -/// -/// The fork is formatted depending on its fork condition: -/// -/// - Block and timestamp based forks are formatted in the same manner (`{name} <({eip})> -/// @{condition}`) -/// - TTD based forks are formatted separately as `{name} <({eip})> @{ttd} (network is known -/// to be merged)` -/// -/// An optional EIP can be attached to the fork to display as well. This should generally be in the -/// form of just `EIP-x`, e.g. `EIP-1559`. -#[derive(Debug)] -struct DisplayFork { - /// The name of the hardfork (e.g. Frontier) - name: String, - /// The fork condition - activated_at: ForkCondition, - /// An optional EIP (e.g. `EIP-1559`). - eip: Option, -} - -impl Display for DisplayFork { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - let name_with_eip = if let Some(eip) = &self.eip { - format!("{} ({})", self.name, eip) - } else { - self.name.clone() - }; - - match self.activated_at { - ForkCondition::Block(at) | ForkCondition::Timestamp(at) => { - write!(f, "{name_with_eip:32} @{at}")?; - } - ForkCondition::TTD { fork_block, total_difficulty } => { - write!( - f, - "{:32} @{} ({})", - name_with_eip, - total_difficulty, - if fork_block.is_some() { - "network is known to be merged" - } else { - "network is not known to be merged" - } - )?; - } - ForkCondition::Never => unreachable!(), - } - - Ok(()) - } -} - -/// A container for pretty-printing a list of hardforks. -/// -/// # Examples -/// -/// ``` -/// # use reth_primitives::MAINNET; -/// println!("{}", MAINNET.display_hardforks()); -/// ``` -/// -/// An example of the output: -/// -/// ```text -/// Pre-merge hard forks (block based): -// - Frontier @0 -// - Homestead @1150000 -// - Dao @1920000 -// - Tangerine @2463000 -// - SpuriousDragon @2675000 -// - Byzantium @4370000 -// - Constantinople @7280000 -// - Petersburg @7280000 -// - Istanbul @9069000 -// - MuirGlacier @9200000 -// - Berlin @12244000 -// - London @12965000 -// - ArrowGlacier @13773000 -// - GrayGlacier @15050000 -// Merge hard forks: -// - Paris @58750000000000000000000 (network is known to be merged) -// Post-merge hard forks (timestamp based): -// - Shanghai @1681338455 -/// ``` -#[derive(Debug)] -pub struct DisplayHardforks { - /// A list of pre-merge (block based) hardforks - pre_merge: Vec, - /// A list of merge (TTD based) hardforks - with_merge: Vec, - /// A list of post-merge (timestamp based) hardforks - post_merge: Vec, -} - -impl Display for DisplayHardforks { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - fn format( - header: &str, - forks: &[DisplayFork], - next_is_empty: bool, - f: &mut Formatter<'_>, - ) -> std::fmt::Result { - writeln!(f, "{header}:")?; - let mut iter = forks.iter().peekable(); - while let Some(fork) = iter.next() { - write!(f, "- {fork}")?; - if !next_is_empty || iter.peek().is_some() { - writeln!(f)?; - } - } - Ok(()) - } - - format( - "Pre-merge hard forks (block based)", - &self.pre_merge, - self.with_merge.is_empty(), - f, - )?; - - if !self.with_merge.is_empty() { - format("Merge hard forks", &self.with_merge, self.post_merge.is_empty(), f)?; - } - - if !self.post_merge.is_empty() { - format("Post-merge hard forks (timestamp based)", &self.post_merge, true, f)?; - } - - Ok(()) - } -} - -impl DisplayHardforks { - /// Creates a new [`DisplayHardforks`] from an iterator of hardforks. - pub fn new( - hardforks: &BTreeMap, - known_paris_block: Option, - ) -> Self { - let mut pre_merge = Vec::new(); - let mut with_merge = Vec::new(); - let mut post_merge = Vec::new(); - - for (fork, condition) in hardforks { - let mut display_fork = - DisplayFork { name: fork.to_string(), activated_at: *condition, eip: None }; - - match condition { - ForkCondition::Block(_) => { - pre_merge.push(display_fork); - } - ForkCondition::TTD { total_difficulty, .. } => { - display_fork.activated_at = ForkCondition::TTD { - fork_block: known_paris_block, - total_difficulty: *total_difficulty, - }; - with_merge.push(display_fork); - } - ForkCondition::Timestamp(_) => { - post_merge.push(display_fork); - } - ForkCondition::Never => continue, - } - } - - Self { pre_merge, with_merge, post_merge } - } -} - /// `PoS` deposit contract details. #[derive(Debug, Clone, PartialEq, Eq)] pub struct DepositContract { @@ -1641,94 +1047,73 @@ impl DepositContract { } } +/// Genesis info for Optimism. #[cfg(feature = "optimism")] +#[derive(Default, Debug, serde::Deserialize)] +#[serde(rename_all = "camelCase")] struct OptimismGenesisInfo { - bedrock_block: Option, - regolith_time: Option, - canyon_time: Option, - ecotone_time: Option, - fjord_time: Option, + optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo, + #[serde(skip)] base_fee_params: BaseFeeParamsKind, } #[cfg(feature = "optimism")] impl OptimismGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { - let optimism_config = - genesis.config.extra_fields.get("optimism").and_then(|value| value.as_object()); - - let eip1559_elasticity = optimism_config - .and_then(|config| config.get("eip1559Elasticity")) - .and_then(|value| value.as_u64()); - - let eip1559_denominator = optimism_config - .and_then(|config| config.get("eip1559Denominator")) - .and_then(|value| value.as_u64()); - - let eip1559_denominator_canyon = optimism_config - .and_then(|config| config.get("eip1559DenominatorCanyon")) - .and_then(|value| value.as_u64()); - - let base_fee_params = if let (Some(elasticity), Some(denominator)) = - (eip1559_elasticity, eip1559_denominator) - { - if let Some(canyon_denominator) = eip1559_denominator_canyon { - BaseFeeParamsKind::Variable( - vec![ - ( - Hardfork::London, - BaseFeeParams::new(denominator as u128, elasticity as u128), - ), - ( - Hardfork::Canyon, - BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), - ), - ] - .into(), - ) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128).into() - } - } else { - BaseFeeParams::ethereum().into() + let mut info = Self { + optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo::extract_from( + &genesis.config.extra_fields, + ) + .unwrap_or_default(), + ..Default::default() }; + if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info { + if let (Some(elasticity), Some(denominator)) = ( + optimism_base_fee_info.eip1559_elasticity, + optimism_base_fee_info.eip1559_denominator, + ) { + let base_fee_params = if let Some(canyon_denominator) = + optimism_base_fee_info.eip1559_denominator_canyon + { + BaseFeeParamsKind::Variable( + vec![ + ( + EthereumHardfork::London.boxed(), + BaseFeeParams::new(denominator as u128, elasticity as u128), + ), + ( + OptimismHardfork::Canyon.boxed(), + BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), + ), + ] + .into(), + ) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128).into() + }; - Self { - bedrock_block: genesis - .config - .extra_fields - .get("bedrockBlock") - .and_then(|value| value.as_u64()), - regolith_time: genesis - .config - .extra_fields - .get("regolithTime") - .and_then(|value| value.as_u64()), - canyon_time: genesis - .config - .extra_fields - .get("canyonTime") - .and_then(|value| value.as_u64()), - ecotone_time: genesis - .config - .extra_fields - .get("ecotoneTime") - .and_then(|value| value.as_u64()), - fjord_time: genesis - .config - .extra_fields - .get("fjordTime") - .and_then(|value| value.as_u64()), - base_fee_params, + info.base_fee_params = base_fee_params; + } } + + info } } #[cfg(test)] mod tests { use super::*; - use crate::{b256, hex, proofs::IntoTrieAccount, ChainConfig, GenesisAccount}; + use alloy_chains::Chain; + use alloy_genesis::{ChainConfig, GenesisAccount}; + use alloy_primitives::{b256, hex}; + use core::ops::Deref; + use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head}; + use reth_trie_common::TrieAccount; use std::{collections::HashMap, str::FromStr}; + + #[cfg(feature = "optimism")] + use reth_ethereum_forks::OptimismHardforks; + fn test_fork_ids(spec: &ChainSpec, cases: &[(Head, ForkId)]) { for (block, expected_id) in cases { let computed_id = spec.fork_id(block); @@ -1740,14 +1125,14 @@ mod tests { } } - fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(Hardfork, ForkId)]) { + fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(EthereumHardfork, ForkId)]) { for (hardfork, expected_id) in cases { if let Some(computed_id) = spec.hardfork_fork_id(*hardfork) { assert_eq!( expected_id, &computed_id, "Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for hardfork {hardfork}" ); - if matches!(hardfork, Hardfork::Shanghai) { + if matches!(hardfork, EthereumHardfork::Shanghai) { if let Some(shangai_id) = spec.shanghai_fork_id() { assert_eq!( expected_id, &shangai_id, @@ -1793,8 +1178,8 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(Genesis::default()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Shanghai, ForkCondition::Never) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Never) .build(); assert_eq!( spec.display_hardforks().to_string(), @@ -1809,21 +1194,21 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(Genesis::default()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(0)) - .with_fork(Hardfork::Tangerine, ForkCondition::Block(0)) - .with_fork(Hardfork::SpuriousDragon, ForkCondition::Block(0)) - .with_fork(Hardfork::Byzantium, ForkCondition::Block(0)) - .with_fork(Hardfork::Constantinople, ForkCondition::Block(0)) - .with_fork(Hardfork::Istanbul, ForkCondition::Block(0)) - .with_fork(Hardfork::MuirGlacier, ForkCondition::Block(0)) - .with_fork(Hardfork::Berlin, ForkCondition::Block(0)) - .with_fork(Hardfork::London, ForkCondition::Block(0)) - .with_fork(Hardfork::ArrowGlacier, ForkCondition::Block(0)) - .with_fork(Hardfork::GrayGlacier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Byzantium, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Constantinople, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Istanbul, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::MuirGlacier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Berlin, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::London, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::ArrowGlacier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::GrayGlacier, ForkCondition::Block(0)) .build(); - assert_eq!(spec.hardforks().len(), 12, "12 forks should be active."); + assert_eq!(spec.deref().len(), 12, "12 forks should be active."); assert_eq!( spec.fork_id(&Head { number: 1, ..Default::default() }), ForkId { hash: ForkHash::from(spec.genesis_hash()), next: 0 }, @@ -1837,16 +1222,16 @@ Post-merge hard forks (timestamp based): let unique_spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(1)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(1)) .build(); let duplicate_spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(1)) - .with_fork(Hardfork::Tangerine, ForkCondition::Block(1)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(1)) + .with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(1)) .build(); assert_eq!( @@ -1863,9 +1248,9 @@ Post-merge hard forks (timestamp based): let happy_path_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) .build(); let happy_path_head = happy_path_case.satisfy(ForkCondition::Timestamp(11313123)); let happy_path_expected = Head { number: 73, timestamp: 11313123, ..Default::default() }; @@ -1877,10 +1262,10 @@ Post-merge hard forks (timestamp based): let multiple_timestamp_fork_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(11313398)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(11313398)) .build(); let multi_timestamp_head = multiple_timestamp_fork_case.satisfy(ForkCondition::Timestamp(11313398)); @@ -1894,7 +1279,7 @@ Post-merge hard forks (timestamp based): let no_block_fork_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) .build(); let no_block_fork_head = no_block_fork_case.satisfy(ForkCondition::Timestamp(11313123)); let no_block_fork_expected = Head { number: 0, timestamp: 11313123, ..Default::default() }; @@ -1906,16 +1291,16 @@ Post-merge hard forks (timestamp based): let fork_cond_ttd_blocknum_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) .with_fork( - Hardfork::Paris, + EthereumHardfork::Paris, ForkCondition::TTD { fork_block: Some(101), total_difficulty: U256::from(10_790_000), }, ) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) .build(); let fork_cond_ttd_blocknum_head = fork_cond_ttd_blocknum_case.satisfy(ForkCondition::Timestamp(11313123)); @@ -1932,8 +1317,8 @@ Post-merge hard forks (timestamp based): let fork_cond_block_only_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) .build(); let fork_cond_block_only_head = fork_cond_block_only_case.satisfy(ForkCondition::Block(73)); let fork_cond_block_only_expected = Head { number: 73, ..Default::default() }; @@ -1961,117 +1346,69 @@ Post-merge hard forks (timestamp based): &MAINNET, &[ ( - Hardfork::Frontier, + EthereumHardfork::Frontier, ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }, ), ( - Hardfork::Homestead, + EthereumHardfork::Homestead, ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }, ), - (Hardfork::Dao, ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }), ( - Hardfork::Tangerine, + EthereumHardfork::Dao, + ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + ), + ( + EthereumHardfork::Tangerine, ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }, ), ( - Hardfork::SpuriousDragon, + EthereumHardfork::SpuriousDragon, ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }, ), ( - Hardfork::Byzantium, + EthereumHardfork::Byzantium, ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }, ), ( - Hardfork::Constantinople, + EthereumHardfork::Constantinople, ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, ), ( - Hardfork::Petersburg, + EthereumHardfork::Petersburg, ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, ), ( - Hardfork::Istanbul, + EthereumHardfork::Istanbul, ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }, ), ( - Hardfork::MuirGlacier, + EthereumHardfork::MuirGlacier, ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }, ), ( - Hardfork::Berlin, + EthereumHardfork::Berlin, ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, ), ( - Hardfork::London, + EthereumHardfork::London, ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, ), ( - Hardfork::ArrowGlacier, + EthereumHardfork::ArrowGlacier, ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }, ), ( - Hardfork::GrayGlacier, + EthereumHardfork::GrayGlacier, ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 }, ), ( - Hardfork::Shanghai, + EthereumHardfork::Shanghai, ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 }, ), - (Hardfork::Cancun, ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 }), - ], - ); - } - - #[test] - fn goerli_hardfork_fork_ids() { - test_hardfork_fork_ids( - &GOERLI, - &[ - ( - Hardfork::Frontier, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Homestead, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Tangerine, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::SpuriousDragon, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Byzantium, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Constantinople, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Petersburg, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), ( - Hardfork::Istanbul, - ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 }, - ), - ( - Hardfork::Berlin, - ForkId { hash: ForkHash([0x75, 0x7a, 0x1c, 0x47]), next: 5062605 }, - ), - ( - Hardfork::London, - ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 }, - ), - ( - Hardfork::Shanghai, - ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 }, + EthereumHardfork::Cancun, + ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 }, ), - (Hardfork::Cancun, ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }), ], ); } @@ -2082,54 +1419,57 @@ Post-merge hard forks (timestamp based): &SEPOLIA, &[ ( - Hardfork::Frontier, + EthereumHardfork::Frontier, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Homestead, + EthereumHardfork::Homestead, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Tangerine, + EthereumHardfork::Tangerine, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::SpuriousDragon, + EthereumHardfork::SpuriousDragon, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Byzantium, + EthereumHardfork::Byzantium, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Constantinople, + EthereumHardfork::Constantinople, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Petersburg, + EthereumHardfork::Petersburg, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Istanbul, + EthereumHardfork::Istanbul, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Berlin, + EthereumHardfork::Berlin, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::London, + EthereumHardfork::London, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Paris, + EthereumHardfork::Paris, ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 }, ), ( - Hardfork::Shanghai, + EthereumHardfork::Shanghai, ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 }, ), - (Hardfork::Cancun, ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 0 }), + ( + EthereumHardfork::Cancun, + ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 0 }, + ), ], ); } @@ -2248,63 +1588,6 @@ Post-merge hard forks (timestamp based): ) } - #[test] - fn goerli_forkids() { - test_fork_ids( - &GOERLI, - &[ - ( - Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Head { number: 1561650, ..Default::default() }, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Head { number: 1561651, ..Default::default() }, - ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 }, - ), - ( - Head { number: 4460643, ..Default::default() }, - ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 }, - ), - ( - Head { number: 4460644, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0x7a, 0x1c, 0x47]), next: 5062605 }, - ), - ( - Head { number: 5062605, ..Default::default() }, - ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 }, - ), - ( - Head { number: 6000000, timestamp: 1678832735, ..Default::default() }, - ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 }, - ), - // First Shanghai block - ( - Head { number: 6000001, timestamp: 1678832736, ..Default::default() }, - ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 }, - ), - // Future Shanghai block - ( - Head { number: 6500002, timestamp: 1678832736, ..Default::default() }, - ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 }, - ), - // First Cancun block - ( - Head { number: 6500003, timestamp: 1705473120, ..Default::default() }, - ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }, - ), - // Future Cancun block - ( - Head { number: 6500003, timestamp: 2705473120, ..Default::default() }, - ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }, - ), - ], - ); - } - #[test] fn sepolia_forkids() { test_fork_ids( @@ -2630,8 +1913,8 @@ Post-merge hard forks (timestamp based): cancun_time: u64, ) -> ChainSpec { builder - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(shanghai_time)) - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(cancun_time)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(shanghai_time)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(cancun_time)) .build() } @@ -2686,14 +1969,14 @@ Post-merge hard forks (timestamp based): let terminal_block_ttd = U256::from(58750003716598352816469_u128); let terminal_block_difficulty = U256::from(11055787484078698_u128); assert!(!chainspec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(terminal_block_ttd, terminal_block_difficulty)); // Check that Paris is active on first PoS block #15537394. let first_pos_block_ttd = U256::from(58750003716598352816469_u128); let first_pos_difficulty = U256::ZERO; assert!(chainspec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(first_pos_block_ttd, first_pos_difficulty)); } @@ -2769,55 +2052,64 @@ Post-merge hard forks (timestamp based): // assert a bunch of hardforks that should be set assert_eq!( - chainspec.hardforks.get(&Hardfork::Homestead).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Homestead).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Tangerine).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Tangerine).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::SpuriousDragon).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::SpuriousDragon).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Byzantium).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Byzantium).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Constantinople).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Constantinople).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Petersburg).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Petersburg).unwrap(), + ForkCondition::Block(0) ); - assert_eq!(chainspec.hardforks.get(&Hardfork::Istanbul).unwrap(), &ForkCondition::Block(0)); assert_eq!( - chainspec.hardforks.get(&Hardfork::MuirGlacier).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Istanbul).unwrap(), + ForkCondition::Block(0) ); - assert_eq!(chainspec.hardforks.get(&Hardfork::Berlin).unwrap(), &ForkCondition::Block(0)); - assert_eq!(chainspec.hardforks.get(&Hardfork::London).unwrap(), &ForkCondition::Block(0)); assert_eq!( - chainspec.hardforks.get(&Hardfork::ArrowGlacier).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::MuirGlacier).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::GrayGlacier).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Berlin).unwrap(), + ForkCondition::Block(0) + ); + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::London).unwrap(), + ForkCondition::Block(0) + ); + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::ArrowGlacier).unwrap(), + ForkCondition::Block(0) + ); + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::GrayGlacier).unwrap(), + ForkCondition::Block(0) ); // including time based hardforks assert_eq!( - chainspec.hardforks.get(&Hardfork::Shanghai).unwrap(), - &ForkCondition::Timestamp(0) + chainspec.hardforks.get(EthereumHardfork::Shanghai).unwrap(), + ForkCondition::Timestamp(0) ); // including time based hardforks assert_eq!( - chainspec.hardforks.get(&Hardfork::Cancun).unwrap(), - &ForkCondition::Timestamp(1) + chainspec.hardforks.get(EthereumHardfork::Cancun).unwrap(), + ForkCondition::Timestamp(1) ); // alloc key -> expected rlp mapping @@ -2829,10 +2121,7 @@ Post-merge hard forks (timestamp based): for (key, expected_rlp) in key_rlp { let account = chainspec.genesis.alloc.get(&key).expect("account should exist"); - assert_eq!( - &alloy_rlp::encode(IntoTrieAccount::to_trie_account(account.clone())), - expected_rlp - ); + assert_eq!(&alloy_rlp::encode(TrieAccount::from(account.clone())), expected_rlp); } assert_eq!(chainspec.genesis_hash, None); @@ -2908,8 +2197,7 @@ Post-merge hard forks (timestamp based): } "#; - let _genesis = serde_json::from_str::(hive_json).unwrap(); - let genesis = serde_json::from_str::(hive_json).unwrap(); + let genesis = serde_json::from_str::(hive_json).unwrap(); let chainspec: ChainSpec = genesis.into(); assert_eq!(chainspec.genesis_hash, None); assert_eq!(chainspec.chain, Chain::from_named(NamedChain::Optimism)); @@ -2917,14 +2205,14 @@ Post-merge hard forks (timestamp based): hex!("9a6049ac535e3dc7436c189eaa81c73f35abd7f282ab67c32944ff0301d63360").into(); assert_eq!(chainspec.genesis_header().state_root, expected_state_root); let hard_forks = vec![ - Hardfork::Byzantium, - Hardfork::Homestead, - Hardfork::Istanbul, - Hardfork::Petersburg, - Hardfork::Constantinople, + EthereumHardfork::Byzantium, + EthereumHardfork::Homestead, + EthereumHardfork::Istanbul, + EthereumHardfork::Petersburg, + EthereumHardfork::Constantinople, ]; - for ref fork in hard_forks { - assert_eq!(chainspec.hardforks.get(fork).unwrap(), &ForkCondition::Block(0)); + for fork in hard_forks { + assert_eq!(chainspec.hardforks.get(fork).unwrap(), ForkCondition::Block(0)); } let expected_hash: B256 = @@ -3094,13 +2382,7 @@ Post-merge hard forks (timestamp based): #[test] fn test_parse_prague_genesis_all_formats() { let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661, "pragueTime": 4662},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#; - let genesis: AllGenesisFormats = serde_json::from_str(s).unwrap(); - - // this should be the genesis format - let genesis = match genesis { - AllGenesisFormats::Geth(genesis) => genesis, - _ => panic!("expected geth genesis format"), - }; + let genesis: Genesis = serde_json::from_str(s).unwrap(); // assert that the alloc was picked up let acc = genesis @@ -3117,13 +2399,7 @@ Post-merge hard forks (timestamp based): #[test] fn test_parse_cancun_genesis_all_formats() { let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#; - let genesis: AllGenesisFormats = serde_json::from_str(s).unwrap(); - - // this should be the genesis format - let genesis = match genesis { - AllGenesisFormats::Geth(genesis) => genesis, - _ => panic!("expected geth genesis format"), - }; + let genesis: Genesis = serde_json::from_str(s).unwrap(); // assert that the alloc was picked up let acc = genesis @@ -3185,12 +2461,12 @@ Post-merge hard forks (timestamp based): #[test] fn holesky_paris_activated_at_genesis() { assert!(HOLESKY - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(HOLESKY.genesis.difficulty, HOLESKY.genesis.difficulty)); } #[test] - fn test_all_genesis_formats_deserialization() { + fn test_genesis_format_deserialization() { // custom genesis with chain config let config = ChainConfig { chain_id: 2600, @@ -3228,22 +2504,9 @@ Post-merge hard forks (timestamp based): // ensure genesis is deserialized correctly let serialized_genesis = serde_json::to_string(&genesis).unwrap(); - let deserialized_genesis: AllGenesisFormats = - serde_json::from_str(&serialized_genesis).unwrap(); - assert!(matches!(deserialized_genesis, AllGenesisFormats::Geth(_))); + let deserialized_genesis: Genesis = serde_json::from_str(&serialized_genesis).unwrap(); - // build chain - let chain_spec = ChainSpecBuilder::default() - .chain(2600.into()) - .genesis(genesis) - .cancun_activated() - .build(); - - // ensure chain spec is deserialized correctly - let serialized_chain_spec = serde_json::to_string(&chain_spec).unwrap(); - let deserialized_chain_spec: AllGenesisFormats = - serde_json::from_str(&serialized_chain_spec).unwrap(); - assert!(matches!(deserialized_chain_spec, AllGenesisFormats::Reth(_))) + assert_eq!(genesis, deserialized_genesis); } #[test] @@ -3252,13 +2515,16 @@ Post-merge hard forks (timestamp based): chain: Chain::mainnet(), genesis: Genesis::default(), genesis_hash: None, - hardforks: BTreeMap::from([(Hardfork::Frontier, ForkCondition::Never)]), + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Frontier.boxed(), + ForkCondition::Never, + )]), paris_block_and_final_difficulty: None, deposit_contract: None, ..Default::default() }; - assert_eq!(spec.hardfork_fork_id(Hardfork::Frontier), None); + assert_eq!(spec.hardfork_fork_id(EthereumHardfork::Frontier), None); } #[test] @@ -3267,13 +2533,16 @@ Post-merge hard forks (timestamp based): chain: Chain::mainnet(), genesis: Genesis::default(), genesis_hash: None, - hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Never)]), + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Shanghai.boxed(), + ForkCondition::Never, + )]), paris_block_and_final_difficulty: None, deposit_contract: None, ..Default::default() }; - assert_eq!(spec.hardfork_fork_filter(Hardfork::Shanghai), None); + assert_eq!(spec.hardfork_fork_filter(EthereumHardfork::Shanghai), None); } #[test] @@ -3391,17 +2660,17 @@ Post-merge hard forks (timestamp based): BaseFeeParamsKind::Constant(BaseFeeParams::new(70, 60)) ); - assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); } #[cfg(feature = "optimism")] @@ -3452,23 +2721,100 @@ Post-merge hard forks (timestamp based): chain_spec.base_fee_params, BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BaseFeeParams::new(70, 60)), - (Hardfork::Canyon, BaseFeeParams::new(80, 60)), + (EthereumHardfork::London.boxed(), BaseFeeParams::new(70, 60)), + (OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), ] .into() ) ); - assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 0)); - - assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 50)); + assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); + + assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); + } + + #[cfg(feature = "optimism")] + #[test] + fn parse_genesis_optimism_with_variable_base_fee_params() { + use op_alloy_rpc_types::genesis::OptimismBaseFeeInfo; + + let geth_genesis = r#" + { + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 15, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + } + } + } + "#; + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + let chainspec = ChainSpec::from(genesis.clone()); + + let actual_chain_id = genesis.config.chain_id; + assert_eq!(actual_chain_id, 8453); + + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::Istanbul), + Some(ForkCondition::Block(0)) + ); + + let actual_bedrock_block = genesis.config.extra_fields.get("bedrockBlock"); + assert_eq!(actual_bedrock_block, Some(serde_json::Value::from(0)).as_ref()); + let actual_canyon_timestamp = genesis.config.extra_fields.get("canyonTime"); + assert_eq!(actual_canyon_timestamp, None); + + assert!(genesis.config.terminal_total_difficulty_passed); + + let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); + let optimism_base_fee_info = + serde_json::from_value::(optimism_object.clone()).unwrap(); + + assert_eq!( + optimism_base_fee_info, + OptimismBaseFeeInfo { + eip1559_elasticity: Some(6), + eip1559_denominator: Some(50), + eip1559_denominator_canyon: None, + } + ); + assert_eq!( + chainspec.base_fee_params, + BaseFeeParamsKind::Constant(BaseFeeParams { + max_change_denominator: 50, + elasticity_multiplier: 6, + }) + ); + + assert!(chainspec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + + assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); } } diff --git a/crates/cli/cli/Cargo.toml b/crates/cli/cli/Cargo.toml new file mode 100644 index 000000000000..83ea9da6f9bb --- /dev/null +++ b/crates/cli/cli/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] + + +[dependencies] +# reth +reth-cli-runner.workspace = true +reth-chainspec.workspace = true +eyre.workspace = true + +# misc +clap.workspace = true diff --git a/crates/cli/cli/src/chainspec.rs b/crates/cli/cli/src/chainspec.rs new file mode 100644 index 000000000000..4c1b4372fd0b --- /dev/null +++ b/crates/cli/cli/src/chainspec.rs @@ -0,0 +1,25 @@ +use clap::builder::TypedValueParser; +use reth_chainspec::ChainSpec; +use std::sync::Arc; + +/// Trait for parsing chain specifications. +/// +/// This trait extends [`clap::builder::TypedValueParser`] to provide a parser for chain +/// specifications. Implementers of this trait must provide a list of supported chains and a +/// function to parse a given string into a [`ChainSpec`]. +pub trait ChainSpecParser: TypedValueParser> + Default { + /// List of supported chains. + const SUPPORTED_CHAINS: &'static [&'static str]; + + /// Parses the given string into a [`ChainSpec`]. + /// + /// # Arguments + /// + /// * `s` - A string slice that holds the chain spec to be parsed. + /// + /// # Errors + /// + /// This function will return an error if the input string cannot be parsed into a valid + /// [`ChainSpec`]. + fn parse(&self, s: &str) -> eyre::Result>; +} diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs new file mode 100644 index 000000000000..9e078e82f221 --- /dev/null +++ b/crates/cli/cli/src/lib.rs @@ -0,0 +1,70 @@ +//! Cli abstraction for reth based nodes. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use std::{borrow::Cow, ffi::OsString}; + +use reth_cli_runner::CliRunner; + +use clap::{Error, Parser}; + +pub mod chainspec; + +/// Reth based node cli. +/// +/// This trait is supposed to be implemented by the main struct of the CLI. +/// +/// It provides commonly used functionality for running commands and information about the CL, such +/// as the name and version. +pub trait RethCli: Sized { + /// The name of the implementation, eg. `reth`, `op-reth`, etc. + fn name(&self) -> Cow<'static, str>; + + /// The version of the node, such as `reth/v1.0.0` + fn version(&self) -> Cow<'static, str>; + + /// Parse args from iterator from [`std::env::args_os()`]. + fn parse_args() -> Result + where + Self: Parser + Sized, + { + ::try_parse_from(std::env::args_os()) + } + + /// Parse args from the given iterator. + fn try_parse_from(itr: I) -> Result + where + Self: Parser + Sized, + I: IntoIterator, + T: Into + Clone, + { + ::try_parse_from(itr) + } + + /// Executes a command. + fn with_runner(self, f: F) -> R + where + F: FnOnce(Self, CliRunner) -> R, + { + let runner = CliRunner::default(); + + f(self, runner) + } + + /// Parses and executes a command. + fn execute(f: F) -> Result + where + Self: Parser + Sized, + F: FnOnce(Self, CliRunner) -> R, + { + let cli = Self::parse_args()?; + + Ok(cli.with_runner(f)) + } +} diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml new file mode 100644 index 000000000000..1bb1a4e00e2f --- /dev/null +++ b/crates/cli/commands/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "reth-cli-commands" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] + +[dependencies] +reth-beacon-consensus.workspace = true +reth-chainspec.workspace = true +reth-cli-runner.workspace = true +reth-cli-util.workspace = true +reth-config.workspace = true +reth-consensus.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-db-api.workspace = true +reth-db-common.workspace = true +reth-downloaders.workspace = true +reth-evm.workspace = true +reth-exex.workspace = true +reth-fs-util.workspace = true +reth-network = { workspace = true, features = ["serde"] } +reth-network-p2p.workspace = true +reth-node-core.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-stages.workspace = true +reth-static-file-types.workspace = true +reth-static-file.workspace = true +reth-trie = { workspace = true, features = ["metrics"] } + +tokio.workspace = true +itertools.workspace = true + +# misc +ahash = "0.8" +human_bytes = "0.4.1" +eyre.workspace = true +clap = { workspace = true, features = ["derive", "env"] } +serde.workspace = true +serde_json.workspace = true +tracing.workspace = true +backon.workspace = true + +# io +fdlimit.workspace = true +confy.workspace = true +toml = { workspace = true, features = ["display"] } + +# tui +comfy-table = "7.0" +crossterm = "0.27.0" +ratatui = { version = "0.27", default-features = false, features = [ + "crossterm", +] } + +# metrics +metrics-process.workspace = true + +# reth test-vectors +proptest = { workspace = true, optional = true } +arbitrary = { workspace = true, optional = true } +proptest-arbitrary-interop = { workspace = true, optional = true } + +[features] +default = [] +dev = [ + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary" +] diff --git a/bin/reth/src/commands/common.rs b/crates/cli/commands/src/common.rs similarity index 88% rename from bin/reth/src/commands/common.rs rename to crates/cli/commands/src/common.rs index be12fa3156db..ce733e938cfd 100644 --- a/bin/reth/src/commands/common.rs +++ b/crates/cli/commands/src/common.rs @@ -2,6 +2,7 @@ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; +use reth_chainspec::ChainSpec; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; @@ -9,18 +10,17 @@ use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHe use reth_evm::noop::NoopBlockExecutorProvider; use reth_node_core::{ args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, DatadirArgs, }, dirs::{ChainPath, DataDirPath}, }; -use reth_primitives::ChainSpec; -use reth_provider::{ - providers::StaticFileProvider, HeaderSyncMode, ProviderFactory, StaticFileProviderFactory, -}; +use reth_primitives::B256; +use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; +use tokio::sync::watch; use tracing::{debug, info, warn}; /// Struct to hold config and datadir paths @@ -42,7 +42,7 @@ pub struct EnvironmentArgs { value_name = "CHAIN_OR_PATH", long_help = chain_help(), default_value = SUPPORTED_CHAINS[0], - value_parser = genesis_value_parser + value_parser = chain_value_parser )] pub chain: Arc, @@ -65,7 +65,11 @@ impl EnvironmentArgs { } let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); - let mut config: Config = confy::load_path(config_path).unwrap_or_default(); + let mut config: Config = confy::load_path(config_path) + .inspect_err( + |err| warn!(target: "reth::cli", %err, "Failed to load config file, using default"), + ) + .unwrap_or_default(); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { @@ -105,7 +109,10 @@ impl EnvironmentArgs { static_file_provider: StaticFileProvider, ) -> eyre::Result>> { let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); - let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider); + let prune_modes = + config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); + let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider) + .with_prune_modes(prune_modes.clone()); info!(target: "reth::cli", "Verifying storage consistency."); @@ -119,19 +126,19 @@ impl EnvironmentArgs { return Ok(factory); } - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); - // Highly unlikely to happen, and given its destructive nature, it's better to panic // instead. assert_ne!(unwind_target, PipelineTarget::Unwind(0), "A static file <> database inconsistency was found that would trigger an unwind to block 0"); info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); + let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); + // Builds and executes an unwind-only pipeline let mut pipeline = Pipeline::builder() .add_stages(DefaultStages::new( factory.clone(), - HeaderSyncMode::Continuous, + tip_rx, Arc::new(EthBeaconConsensus::new(self.chain.clone())), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), diff --git a/bin/reth/src/commands/config_cmd.rs b/crates/cli/commands/src/config_cmd.rs similarity index 100% rename from bin/reth/src/commands/config_cmd.rs rename to crates/cli/commands/src/config_cmd.rs diff --git a/bin/reth/src/commands/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs similarity index 97% rename from bin/reth/src/commands/db/checksum.rs rename to crates/cli/commands/src/db/checksum.rs index b0dbb1f7732b..766f69041587 100644 --- a/bin/reth/src/commands/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,11 +1,9 @@ -use crate::{ - commands::db::get::{maybe_json_value_parser, table_key}, - utils::DbTool, -}; +use crate::db::get::{maybe_json_value_parser, table_key}; use ahash::RandomState; use clap::Parser; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; +use reth_db_common::DbTool; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, diff --git a/bin/reth/src/commands/db/clear.rs b/crates/cli/commands/src/db/clear.rs similarity index 96% rename from bin/reth/src/commands/db/clear.rs rename to crates/cli/commands/src/db/clear.rs index 76c1b97e38ad..b9edf458d3f4 100644 --- a/bin/reth/src/commands/db/clear.rs +++ b/crates/cli/commands/src/db/clear.rs @@ -5,8 +5,8 @@ use reth_db_api::{ table::Table, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; use reth_provider::{ProviderFactory, StaticFileProviderFactory}; +use reth_static_file_types::{find_fixed_range, StaticFileSegment}; /// The arguments for the `reth db clear` command #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/diff.rs b/crates/cli/commands/src/db/diff.rs similarity index 99% rename from bin/reth/src/commands/db/diff.rs rename to crates/cli/commands/src/db/diff.rs index fda004f3c34c..41c3ab0e911c 100644 --- a/bin/reth/src/commands/db/diff.rs +++ b/crates/cli/commands/src/db/diff.rs @@ -1,11 +1,11 @@ -use crate::{ - args::DatabaseArgs, - dirs::{DataDirPath, PlatformPath}, - utils::DbTool, -}; use clap::Parser; use reth_db::{open_db_read_only, tables_to_generic, DatabaseEnv, Tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; +use reth_db_common::DbTool; +use reth_node_core::{ + args::DatabaseArgs, + dirs::{DataDirPath, PlatformPath}, +}; use std::{ collections::HashMap, fmt::Debug, diff --git a/bin/reth/src/commands/db/get.rs b/crates/cli/commands/src/db/get.rs similarity index 98% rename from bin/reth/src/commands/db/get.rs rename to crates/cli/commands/src/db/get.rs index 699a31471802..cd721a1db4b1 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,4 +1,3 @@ -use crate::utils::DbTool; use clap::Parser; use reth_db::{ static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, @@ -8,8 +7,10 @@ use reth_db_api::{ database::Database, table::{Decompress, DupSort, Table}, }; -use reth_primitives::{BlockHash, Header, StaticFileSegment}; +use reth_db_common::DbTool; +use reth_primitives::{BlockHash, Header}; use reth_provider::StaticFileProviderFactory; +use reth_static_file_types::StaticFileSegment; use tracing::error; /// The arguments for the `reth db get` command diff --git a/bin/reth/src/commands/db/list.rs b/crates/cli/commands/src/db/list.rs similarity index 99% rename from bin/reth/src/commands/db/list.rs rename to crates/cli/commands/src/db/list.rs index 4689bbfdc0fa..59b3397154af 100644 --- a/bin/reth/src/commands/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -1,9 +1,9 @@ use super::tui::DbListTUI; -use crate::utils::{DbTool, ListFilter}; use clap::Parser; use eyre::WrapErr; use reth_db::{DatabaseEnv, RawValue, TableViewer, Tables}; use reth_db_api::{database::Database, table::Table}; +use reth_db_common::{DbTool, ListFilter}; use reth_primitives::hex; use std::{cell::RefCell, sync::Arc}; use tracing::error; diff --git a/bin/reth/src/commands/db/mod.rs b/crates/cli/commands/src/db/mod.rs similarity index 92% rename from bin/reth/src/commands/db/mod.rs rename to crates/cli/commands/src/db/mod.rs index b4e4ded41aed..cba32fa5e55b 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,11 +1,7 @@ -//! Database debugging tool - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - utils::DbTool, -}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; +use reth_db_common::DbTool; use std::io::{self, Write}; mod checksum; @@ -71,6 +67,16 @@ impl Command { let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); + // ensure the provided datadir exist + eyre::ensure!( + data_dir.data_dir().is_dir(), + "Datadir does not exist: {:?}", + data_dir.data_dir() + ); + + // ensure the provided database exist + eyre::ensure!(db_path.is_dir(), "Database does not exist: {:?}", db_path); + match self.command { // TODO: We'll need to add this on the DB trait. Subcommands::Stats(command) => { diff --git a/bin/reth/src/commands/db/stats.rs b/crates/cli/commands/src/db/stats.rs similarity index 98% rename from bin/reth/src/commands/db/stats.rs rename to crates/cli/commands/src/db/stats.rs index 517b9c9e591f..37f7d617ba47 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -1,4 +1,4 @@ -use crate::{commands::db::checksum::ChecksumViewer, utils::DbTool}; +use crate::db::checksum::ChecksumViewer; use clap::Parser; use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; @@ -6,10 +6,11 @@ use human_bytes::human_bytes; use itertools::Itertools; use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Tables}; use reth_db_api::database::Database; +use reth_db_common::DbTool; use reth_fs_util as fs; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_primitives::static_file::{find_fixed_range, SegmentRangeInclusive}; use reth_provider::providers::StaticFileProvider; +use reth_static_file_types::{find_fixed_range, SegmentRangeInclusive}; use std::{sync::Arc, time::Duration}; #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/tui.rs b/crates/cli/commands/src/db/tui.rs similarity index 100% rename from bin/reth/src/commands/db/tui.rs rename to crates/cli/commands/src/db/tui.rs diff --git a/bin/reth/src/commands/dump_genesis.rs b/crates/cli/commands/src/dump_genesis.rs similarity index 84% rename from bin/reth/src/commands/dump_genesis.rs rename to crates/cli/commands/src/dump_genesis.rs index 843d3d18a64b..ae425ca8c29d 100644 --- a/bin/reth/src/commands/dump_genesis.rs +++ b/crates/cli/commands/src/dump_genesis.rs @@ -1,7 +1,7 @@ //! Command that dumps genesis block JSON configuration to stdout -use crate::args::utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}; use clap::Parser; -use reth_primitives::ChainSpec; +use reth_chainspec::ChainSpec; +use reth_node_core::args::utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}; use std::sync::Arc; /// Dumps genesis block JSON configuration to stdout @@ -15,7 +15,7 @@ pub struct DumpGenesisCommand { value_name = "CHAIN_OR_PATH", long_help = chain_help(), default_value = SUPPORTED_CHAINS[0], - value_parser = genesis_value_parser + value_parser = chain_value_parser )] chain: Arc, } @@ -39,7 +39,7 @@ mod tests { DumpGenesisCommand::parse_from(["reth", "--chain", chain]); assert_eq!( Ok(args.chain.chain), - chain.parse::(), + chain.parse::(), "failed to parse chain {chain}" ); } diff --git a/bin/reth/src/commands/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs similarity index 91% rename from bin/reth/src/commands/init_cmd.rs rename to crates/cli/commands/src/init_cmd.rs index 22657f0c0255..933527cc565a 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -1,6 +1,6 @@ //! Command that initializes the node from a genesis file. -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_provider::BlockHashReader; use tracing::info; diff --git a/bin/reth/src/commands/init_state.rs b/crates/cli/commands/src/init_state.rs similarity index 96% rename from bin/reth/src/commands/init_state.rs rename to crates/cli/commands/src/init_state.rs index dbf45e5816a6..af26d15e0176 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/crates/cli/commands/src/init_state.rs @@ -1,6 +1,6 @@ //! Command that initializes the node from a genesis file. -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_config::config::EtlConfig; use reth_db_api::database::Database; diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs new file mode 100644 index 000000000000..16767544e7ca --- /dev/null +++ b/crates/cli/commands/src/lib.rs @@ -0,0 +1,22 @@ +//! Commonly used reth CLI commands. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod common; +pub mod config_cmd; +pub mod db; +pub mod dump_genesis; +pub mod init_cmd; +pub mod init_state; +pub mod p2p; +pub mod prune; +pub mod recover; +pub mod stage; +#[cfg(feature = "dev")] +pub mod test_vectors; diff --git a/bin/reth/src/commands/p2p/mod.rs b/crates/cli/commands/src/p2p.rs similarity index 60% rename from bin/reth/src/commands/p2p/mod.rs rename to crates/cli/commands/src/p2p.rs index b57a2f07aaba..0fdefac8bd88 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/crates/cli/commands/src/p2p.rs @@ -1,28 +1,21 @@ //! P2P Debugging tool -use crate::{ - args::{ - get_secret_key, - utils::{chain_help, chain_spec_value_parser, hash_or_num_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, DiscoveryArgs, NetworkArgs, - }, - utils::get_single_header, -}; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; -use discv5::ListenConfig; +use reth_chainspec::ChainSpec; +use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; -use reth_db::create_db; use reth_network::NetworkConfigBuilder; use reth_network_p2p::bodies::client::BodiesClient; -use reth_node_core::args::DatadirArgs; -use reth_primitives::{BlockHashOrNumber, ChainSpec}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; -use std::{ - net::{IpAddr, SocketAddrV4, SocketAddrV6}, - path::PathBuf, - sync::Arc, +use reth_node_core::{ + args::{ + utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, DatadirArgs, NetworkArgs, + }, + utils::get_single_header, }; +use reth_primitives::BlockHashOrNumber; +use std::{path::PathBuf, sync::Arc}; /// `reth p2p` command #[derive(Debug, Parser)] @@ -39,7 +32,7 @@ pub struct Command { value_name = "CHAIN_OR_PATH", long_help = chain_help(), default_value = SUPPORTED_CHAINS[0], - value_parser = chain_spec_value_parser + value_parser = chain_value_parser )] chain: Arc, @@ -79,18 +72,12 @@ pub enum Subcommands { impl Command { /// Execute `p2p` command pub async fn execute(&self) -> eyre::Result<()> { - let tempdir = tempfile::TempDir::new()?; - let noop_db = Arc::new(create_db(tempdir.into_path(), self.db.database_args())?); - - // add network name to data dir let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = confy::load_path(&config_path).unwrap_or_default(); - for peer in &self.network.trusted_peers { - config.peers.trusted_nodes.insert(peer.resolve().await?); - } + config.peers.trusted_nodes.extend(self.network.resolve_trusted_peers().await?); if config.peers.trusted_nodes.is_empty() && self.network.trusted_only { eyre::bail!("No trusted nodes. Set trusted peer with `--trusted-peer ` or set `--trusted-only` to `false`") @@ -105,62 +92,20 @@ impl Command { let rlpx_socket = (self.network.addr, self.network.port).into(); let boot_nodes = self.chain.bootnodes().unwrap_or_default(); - let network = NetworkConfigBuilder::new(p2p_secret_key) + let net = NetworkConfigBuilder::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) .chain_spec(self.chain.clone()) .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(boot_nodes.clone()) .apply(|builder| { - self.network - .discovery - .apply_to_builder(builder, rlpx_socket) - .map_discv5_config_builder(|builder| { - let DiscoveryArgs { - discv5_addr, - discv5_addr_ipv6, - discv5_port, - discv5_port_ipv6, - discv5_lookup_interval, - discv5_bootstrap_lookup_interval, - discv5_bootstrap_lookup_countdown, - .. - } = self.network.discovery; - - // Use rlpx address if none given - let discv5_addr_ipv4 = discv5_addr.or(match self.network.addr { - IpAddr::V4(ip) => Some(ip), - IpAddr::V6(_) => None, - }); - let discv5_addr_ipv6 = discv5_addr_ipv6.or(match self.network.addr { - IpAddr::V4(_) => None, - IpAddr::V6(ip) => Some(ip), - }); - - builder - .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( - discv5_addr_ipv4 - .map(|addr| SocketAddrV4::new(addr, discv5_port)), - discv5_addr_ipv6.map(|addr| { - SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0) - }), - )) - .build(), - ) - .add_unsigned_boot_nodes(boot_nodes.into_iter()) - .lookup_interval(discv5_lookup_interval) - .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) - .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) - }) + self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) }) - .build(Arc::new(ProviderFactory::new( - noop_db, - self.chain.clone(), - StaticFileProvider::read_write(data_dir.static_files())?, - ))) - .start_network() + .build_with_noop_provider() + .manager() .await?; + let network = net.handle().clone(); + tokio::task::spawn(net); let fetch_client = network.fetch_client().await?; let retries = self.retries.max(1); diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs new file mode 100644 index 000000000000..6cc5e033bc04 --- /dev/null +++ b/crates/cli/commands/src/prune.rs @@ -0,0 +1,42 @@ +//! Command that runs pruning without any limits. +use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use clap::Parser; +use reth_prune::PrunerBuilder; +use reth_static_file::StaticFileProducer; +use tracing::info; + +/// Prunes according to the configuration without any limits +#[derive(Debug, Parser)] +pub struct PruneCommand { + #[command(flatten)] + env: EnvironmentArgs, +} + +impl PruneCommand { + /// Execute the `prune` command + pub async fn execute(self) -> eyre::Result<()> { + let Environment { config, provider_factory, .. } = self.env.init(AccessRights::RW)?; + let prune_config = config.prune.unwrap_or_default(); + + // Copy data from database to static files + info!(target: "reth::cli", "Copying data from database to static files..."); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); + let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min(); + info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); + + // Delete data which has been copied to static files. + if let Some(prune_tip) = lowest_static_file_height { + info!(target: "reth::cli", ?prune_tip, ?prune_config, "Pruning data from database..."); + // Run the pruner according to the configuration, and don't enforce any limits on it + let mut pruner = PrunerBuilder::new(prune_config) + .prune_delete_limit(usize::MAX) + .build(provider_factory); + + pruner.run(prune_tip)?; + info!(target: "reth::cli", "Pruned data from database"); + } + + Ok(()) + } +} diff --git a/bin/reth/src/commands/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs similarity index 100% rename from bin/reth/src/commands/recover/mod.rs rename to crates/cli/commands/src/recover/mod.rs diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs similarity index 96% rename from bin/reth/src/commands/recover/storage_tries.rs rename to crates/cli/commands/src/recover/storage_tries.rs index b1dbbfa88ce5..2b4087144805 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,4 +1,4 @@ -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_cli_runner::CliContext; use reth_db::tables; diff --git a/bin/reth/src/commands/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs similarity index 96% rename from bin/reth/src/commands/stage/drop.rs rename to crates/cli/commands/src/stage/drop.rs index 8297eafef81a..8278185df09a 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -1,18 +1,17 @@ //! Database debugging tool - -use crate::{ - args::StageEnum, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - utils::DbTool, -}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; use reth_db::{static_file::iter_static_files, tables, DatabaseEnv}; use reth_db_api::transaction::DbTxMut; -use reth_db_common::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; -use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; +use reth_db_common::{ + init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, + DbTool, +}; +use reth_node_core::args::StageEnum; use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_stages::StageId; +use reth_static_file_types::{find_fixed_range, StaticFileSegment}; /// `reth drop-stage` command #[derive(Debug, Parser)] diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs similarity index 88% rename from bin/reth/src/commands/stage/dump/execution.rs rename to crates/cli/commands/src/stage/dump/execution.rs index b6d6721dcf8d..61fc5e41ceff 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -1,26 +1,32 @@ use super::setup; -use crate::{macros::block_executor, utils::DbTool}; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx, }; +use reth_db_common::DbTool; +use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, ChainSpecProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_execution_stage( +pub(crate) async fn dump_execution_stage( db_tool: &DbTool, from: u64, to: u64, output_datadir: ChainPath, should_run: bool, -) -> eyre::Result<()> { + executor: E, +) -> eyre::Result<()> +where + DB: Database, + E: BlockExecutorProvider, +{ let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; - unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run( @@ -31,8 +37,8 @@ pub(crate) async fn dump_execution_stage( ), to, from, - ) - .await?; + executor, + )?; } Ok(()) @@ -119,7 +125,7 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -async fn unwind_and_copy( +fn unwind_and_copy( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -127,8 +133,7 @@ async fn unwind_and_copy( ) -> eyre::Result<()> { let provider = db_tool.provider_factory.provider_rw()?; - let executor = block_executor!(db_tool.chain()); - let mut exec_stage = ExecutionStage::new_with_executor(executor); + let mut exec_stage = ExecutionStage::new_with_executor(NoopBlockExecutorProvider::default()); exec_stage.unwind( &provider, @@ -150,14 +155,18 @@ async fn unwind_and_copy( } /// Try to re-execute the stage without committing -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, -) -> eyre::Result<()> { + executor: E, +) -> eyre::Result<()> +where + DB: Database, + E: BlockExecutorProvider, +{ info!(target: "reth::cli", "Executing stage. [dry-run]"); - let executor = block_executor!(output_provider_factory.chain_spec()); let mut exec_stage = ExecutionStage::new_with_executor(executor); let input = diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs similarity index 96% rename from bin/reth/src/commands/stage/dump/hashing_account.rs rename to crates/cli/commands/src/stage/dump/hashing_account.rs index 116d4919733e..418426f5ec53 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -1,8 +1,8 @@ use super::setup; -use crate::utils::DbTool; use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; +use reth_db_common::DbTool; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_primitives::BlockNumber; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; @@ -38,8 +38,7 @@ pub(crate) async fn dump_hashing_account_stage( ), to, from, - ) - .await?; + )?; } Ok(()) @@ -71,7 +70,7 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs similarity index 96% rename from bin/reth/src/commands/stage/dump/hashing_storage.rs rename to crates/cli/commands/src/stage/dump/hashing_storage.rs index d5671aae024d..357fe596ffa9 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -1,8 +1,8 @@ use super::setup; -use crate::utils::DbTool; use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; +use reth_db_common::DbTool; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; @@ -28,8 +28,7 @@ pub(crate) async fn dump_hashing_storage_stage( ), to, from, - ) - .await?; + )?; } Ok(()) @@ -66,7 +65,7 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs similarity index 90% rename from bin/reth/src/commands/stage/dump/merkle.rs rename to crates/cli/commands/src/stage/dump/merkle.rs index 451133dc2085..5c2641cc1997 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -1,20 +1,21 @@ use super::setup; -use crate::{macros::block_executor, utils::DbTool}; use eyre::Result; use reth_config::config::EtlConfig; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; +use reth_db_common::DbTool; +use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_primitives::BlockNumber; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{ stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + AccountHashingStage, ExecutionStage, MerkleStage, StorageHashingStage, + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, }, - Stage, StageCheckpoint, UnwindInput, + ExecutionStageThresholds, Stage, StageCheckpoint, UnwindInput, }; use tracing::info; @@ -43,7 +44,7 @@ pub(crate) async fn dump_merkle_stage( ) })??; - unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db)?; if should_run { dry_run( @@ -54,15 +55,14 @@ pub(crate) async fn dump_merkle_stage( ), to, from, - ) - .await?; + )?; } Ok(()) } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -async fn unwind_and_copy( +fn unwind_and_copy( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, @@ -86,11 +86,9 @@ async fn unwind_and_copy( MerkleStage::default_unwind().unwind(&provider, unwind)?; - let executor = block_executor!(db_tool.chain()); - // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - executor, + NoopBlockExecutorProvider::default(), // Not necessary for unwinding. ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, @@ -142,7 +140,7 @@ async fn unwind_and_copy( } /// Try to re-execute the stage straight away -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/bin/reth/src/commands/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs similarity index 77% rename from bin/reth/src/commands/stage/dump/mod.rs rename to crates/cli/commands/src/stage/dump/mod.rs index 287708b00d68..7366ff9981e0 100644 --- a/bin/reth/src/commands/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -1,20 +1,19 @@ //! Database debugging tool - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - dirs::DataDirPath, - utils::DbTool, -}; - -use crate::args::DatadirArgs; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_chainspec::ChainSpec; use reth_db::{init_db, mdbx::DatabaseArguments, tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, models::ClientVersion, table::TableImporter, transaction::DbTx, }; -use reth_node_core::dirs::PlatformPath; -use std::path::PathBuf; +use reth_db_common::DbTool; +use reth_evm::execute::BlockExecutorProvider; +use reth_node_core::{ + args::DatadirArgs, + dirs::{DataDirPath, PlatformPath}, +}; +use std::{path::PathBuf, sync::Arc}; use tracing::info; mod hashing_storage; @@ -77,16 +76,29 @@ macro_rules! handle_stage { let output_datadir = output_datadir.with_chain($tool.chain().chain, DatadirArgs::default()); $stage_fn($tool, *from, *to, output_datadir, *dry_run).await? }}; + + ($stage_fn:ident, $tool:expr, $command:expr, $executor:expr) => {{ + let StageCommand { output_datadir, from, to, dry_run, .. } = $command; + let output_datadir = output_datadir.with_chain($tool.chain().chain, DatadirArgs::default()); + $stage_fn($tool, *from, *to, output_datadir, *dry_run, $executor).await? + }}; } impl Command { /// Execute `dump-stage` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute(self, executor: F) -> eyre::Result<()> + where + E: BlockExecutorProvider, + F: FnOnce(Arc) -> E, + { let Environment { provider_factory, .. } = self.env.init(AccessRights::RO)?; let tool = DbTool::new(provider_factory)?; match &self.command { - Stages::Execution(cmd) => handle_stage!(dump_execution_stage, &tool, cmd), + Stages::Execution(cmd) => { + let executor = executor(tool.chain()); + handle_stage!(dump_execution_stage, &tool, cmd, executor) + } Stages::StorageHashing(cmd) => handle_stage!(dump_hashing_storage_stage, &tool, cmd), Stages::AccountHashing(cmd) => handle_stage!(dump_hashing_account_stage, &tool, cmd), Stages::Merkle(cmd) => handle_stage!(dump_merkle_stage, &tool, cmd), diff --git a/bin/reth/src/commands/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs similarity index 73% rename from bin/reth/src/commands/stage/mod.rs rename to crates/cli/commands/src/stage/mod.rs index 8f514295e25c..e0365c879d7e 100644 --- a/bin/reth/src/commands/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -1,7 +1,11 @@ //! `reth stage` command +use std::sync::Arc; + use clap::{Parser, Subcommand}; +use reth_chainspec::ChainSpec; use reth_cli_runner::CliContext; +use reth_evm::execute::BlockExecutorProvider; pub mod drop; pub mod dump; @@ -35,11 +39,15 @@ pub enum Subcommands { impl Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> + where + E: BlockExecutorProvider, + F: FnOnce(Arc) -> E, + { match self.command { - Subcommands::Run(command) => command.execute(ctx).await, + Subcommands::Run(command) => command.execute(ctx, executor).await, Subcommands::Drop(command) => command.execute().await, - Subcommands::Dump(command) => command.execute().await, + Subcommands::Dump(command) => command.execute(executor).await, Subcommands::Unwind(command) => command.execute().await, } } diff --git a/bin/reth/src/commands/stage/run.rs b/crates/cli/commands/src/stage/run.rs similarity index 82% rename from bin/reth/src/commands/stage/run.rs rename to crates/cli/commands/src/stage/run.rs index 050895e1130c..9b6416cf4fe0 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -1,29 +1,31 @@ //! Main `stage` command //! //! Stage debugging tool - -use crate::{ - args::{get_secret_key, NetworkArgs, StageEnum}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - prometheus_exporter, -}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; +use reth_chainspec::ChainSpec; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; +use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; +use reth_node_core::{ + args::{NetworkArgs, StageEnum}, + prometheus_exporter, +}; use reth_provider::{ ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, + StaticFileWriter, }; use reth_stages::{ stages::{ - AccountHashingStage, BodyStage, ExecutionStage, ExecutionStageThresholds, - IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, - StorageHashingStage, TransactionLookupStage, + AccountHashingStage, BodyStage, ExecutionStage, IndexAccountHistoryStage, + IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, + TransactionLookupStage, }, - ExecInput, ExecOutput, Stage, StageExt, UnwindInput, UnwindOutput, + ExecInput, ExecOutput, ExecutionStageThresholds, Stage, StageExt, UnwindInput, UnwindOutput, }; use std::{any::Any, net::SocketAddr, sync::Arc, time::Instant}; use tracing::*; @@ -82,7 +84,11 @@ pub struct Command { impl Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> + where + E: BlockExecutorProvider, + F: FnOnce(Arc) -> E, + { // Raise the fd limit of the process. // Does not do anything on windows. let _ = fdlimit::raise_fd_limit(); @@ -117,12 +123,7 @@ impl Command { let mut config = config; config.peers.trusted_nodes_only = self.network.trusted_only; - if !self.network.trusted_peers.is_empty() { - for peer in &self.network.trusted_peers { - let peer = peer.resolve().await?; - config.peers.trusted_nodes.insert(peer); - } - } + config.peers.trusted_nodes.extend(self.network.resolve_trusted_peers().await?); let network_secret_path = self .network @@ -167,24 +168,21 @@ impl Command { })), None, ), - StageEnum::Execution => { - let executor = block_executor!(provider_factory.chain_spec()); - ( - Box::new(ExecutionStage::new( - executor, - ExecutionStageThresholds { - max_blocks: Some(batch_size), - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - config.stages.merkle.clean_threshold, - prune_modes, - ExExManagerHandle::empty(), - )), - None, - ) - } + StageEnum::Execution => ( + Box::new(ExecutionStage::new( + executor(provider_factory.chain_spec()), + ExecutionStageThresholds { + max_blocks: Some(batch_size), + max_changes: None, + max_cumulative_gas: None, + max_duration: None, + }, + config.stages.merkle.clean_threshold, + prune_modes, + ExExManagerHandle::empty(), + )), + None, + ), StageEnum::TxLookup => ( Box::new(TransactionLookupStage::new( TransactionLookupConfig { chunk_size: batch_size }, @@ -253,7 +251,12 @@ impl Command { } if self.commit { + // For unwinding it makes more sense to commit the database first, since if + // this function is interrupted before the static files commit, we can just + // truncate the static files according to the + // checkpoints on the next start-up. provider_rw.commit()?; + provider_factory.static_file_provider().commit()?; provider_rw = provider_factory.provider_rw()?; } } @@ -276,6 +279,7 @@ impl Command { provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?; } if self.commit { + provider_factory.static_file_provider().commit()?; provider_rw.commit()?; provider_rw = provider_factory.provider_rw()?; } diff --git a/bin/reth/src/commands/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs similarity index 72% rename from bin/reth/src/commands/stage/unwind.rs rename to crates/cli/commands/src/stage/unwind.rs index 89131e5aac11..7659fdfc1501 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -1,34 +1,31 @@ //! Unwinding a certain block range +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; use reth_config::Config; use reth_consensus::Consensus; use reth_db_api::database::Database; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_core::args::NetworkArgs; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, FinalizedBlockReader, - FinalizedBlockWriter, HeaderSyncMode, ProviderFactory, StaticFileProviderFactory, + FinalizedBlockWriter, ProviderFactory, StaticFileProviderFactory, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{ - sets::DefaultStages, - stages::{ExecutionStage, ExecutionStageThresholds}, - Pipeline, StageSet, + sets::{DefaultStages, OfflineStages}, + stages::ExecutionStage, + ExecutionStageThresholds, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; use tracing::info; -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, -}; - /// `reth stage unwind` command #[derive(Debug, Parser)] pub struct Command { @@ -40,6 +37,11 @@ pub struct Command { #[command(subcommand)] command: Subcommands, + + /// If this is enabled, then all stages except headers, bodies, and sender recovery will be + /// unwound. + #[arg(long)] + offline: bool, } impl Command { @@ -52,17 +54,31 @@ impl Command { eyre::bail!("Cannot unwind genesis block") } - // Only execute a pipeline unwind if the start of the range overlaps the existing static - // files. If that's the case, then copy all available data from MDBX to static files, and - // only then, proceed with the unwind. - if let Some(highest_static_block) = provider_factory + let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() .max() - .filter(|highest_static_file_block| highest_static_file_block >= range.start()) - { - info!(target: "reth::cli", ?range, ?highest_static_block, "Executing a pipeline unwind."); - let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; + .filter(|highest_static_file_block| highest_static_file_block >= range.start()); + + // Execute a pipeline unwind if the start of the range overlaps the existing static + // files. If that's the case, then copy all available data from MDBX to static files, and + // only then, proceed with the unwind. + // + // We also execute a pipeline unwind if `offline` is specified, because we need to only + // unwind the data associated with offline stages. + if highest_static_file_block.is_some() || self.offline { + if self.offline { + info!(target: "reth::cli", "Performing an unwind for offline-only data!"); + } + + if let Some(highest_static_file_block) = highest_static_file_block { + info!(target: "reth::cli", ?range, ?highest_static_file_block, "Executing a pipeline unwind."); + } else { + info!(target: "reth::cli", ?range, "Executing a pipeline unwind."); + } + + // This will build an offline-only pipeline if the `offline` flag is enabled + let mut pipeline = self.build_pipeline(config, provider_factory)?; // Move all applicable data from database to static files. pipeline.move_to_static_files()?; @@ -87,12 +103,12 @@ impl Command { provider.commit()?; } - println!("Unwound {} blocks", range.count()); + info!(target: "reth::cli", range=?range.clone(), count=range.count(), "Unwound blocks"); Ok(()) } - async fn build_pipeline( + fn build_pipeline( self, config: Config, provider_factory: ProviderFactory>, @@ -103,15 +119,21 @@ impl Command { let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let executor = block_executor!(provider_factory.chain_spec()); - let header_mode = HeaderSyncMode::Tip(tip_rx); - let pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - .add_stages( + // Unwinding does not require a valid executor + let executor = NoopBlockExecutorProvider::default(); + + let builder = if self.offline { + Pipeline::builder().add_stages( + OfflineStages::new(executor, config.stages, PruneModes::default()) + .builder() + .disable(reth_stages::StageId::SenderRecovery), + ) + } else { + Pipeline::builder().with_tip_sender(tip_tx).add_stages( DefaultStages::new( provider_factory.clone(), - header_mode, + tip_rx, Arc::clone(&consensus), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), @@ -132,10 +154,12 @@ impl Command { ExExManagerHandle::empty(), )), ) - .build( - provider_factory.clone(), - StaticFileProducer::new(provider_factory, PruneModes::default()), - ); + }; + + let pipeline = builder.build( + provider_factory.clone(), + StaticFileProducer::new(provider_factory, PruneModes::default()), + ); Ok(pipeline) } } diff --git a/bin/reth/src/commands/test_vectors/mod.rs b/crates/cli/commands/src/test_vectors/mod.rs similarity index 100% rename from bin/reth/src/commands/test_vectors/mod.rs rename to crates/cli/commands/src/test_vectors/mod.rs diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs similarity index 83% rename from bin/reth/src/commands/test_vectors/tables.rs rename to crates/cli/commands/src/test_vectors/tables.rs index 0cd2cf04d7e9..240ecd71f84a 100644 --- a/bin/reth/src/commands/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,15 +1,15 @@ -use std::collections::HashSet; - +use arbitrary::Arbitrary; use eyre::Result; use proptest::{ - arbitrary::Arbitrary, - prelude::{any_with, ProptestConfig}, + prelude::ProptestConfig, strategy::{Strategy, ValueTree}, test_runner::TestRunner, }; +use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; +use std::collections::HashSet; use tracing::error; const VECTORS_FOLDER: &str = "testdata/micro/db"; @@ -73,21 +73,14 @@ pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { /// Generates test-vectors for normal tables. Keys are sorted and not repeated. fn generate_table_vector(runner: &mut TestRunner, per_table: usize) -> Result<()> where - T::Key: Arbitrary + serde::Serialize + Ord + std::hash::Hash, - T::Value: Arbitrary + serde::Serialize, T: Table, + T::Key: for<'a> Arbitrary<'a> + serde::Serialize + Ord + std::hash::Hash + Clone, + T::Value: for<'a> Arbitrary<'a> + serde::Serialize + Clone, { let mut rows = vec![]; let mut seen_keys = HashSet::new(); - let strategy = proptest::collection::vec( - any_with::>(( - ::Parameters::default(), - ::Parameters::default(), - )), - per_table - rows.len(), - ) - .no_shrink() - .boxed(); + let strategy = + proptest::collection::vec(arb::>(), per_table - rows.len()).no_shrink().boxed(); while rows.len() < per_table { // Generate all `per_table` rows: (Key, Value) @@ -111,23 +104,17 @@ where fn generate_dupsort_vector(runner: &mut TestRunner, per_table: usize) -> Result<()> where T: Table + DupSort, - T::Key: Arbitrary + serde::Serialize + Ord + std::hash::Hash, - T::Value: Arbitrary + serde::Serialize + Ord, + T::Key: for<'a> Arbitrary<'a> + serde::Serialize + Ord + std::hash::Hash + Clone, + T::Value: for<'a> Arbitrary<'a> + serde::Serialize + Ord + Clone, { let mut rows = vec![]; // We want to control our repeated keys let mut seen_keys = HashSet::new(); - let strat_values = proptest::collection::vec( - any_with::(::Parameters::default()), - 100..300, - ) - .no_shrink() - .boxed(); + let strat_values = proptest::collection::vec(arb::(), 100..300).no_shrink().boxed(); - let strat_keys = - any_with::(::Parameters::default()).no_shrink().boxed(); + let strat_keys = arb::().no_shrink().boxed(); while rows.len() < per_table { let key: T::Key = strat_keys.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current(); diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 9d5fea6a1c5a..a848ad0b21d2 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -11,7 +11,7 @@ //! Entrypoint for running commands. use reth_tasks::{TaskExecutor, TaskManager}; -use std::{future::Future, pin::pin}; +use std::{future::Future, pin::pin, sync::mpsc, time::Duration}; use tracing::{debug, error, trace}; /// Executes CLI commands. @@ -52,17 +52,26 @@ impl CliRunner { // after the command has finished or exit signal was received we shutdown the task // manager which fires the shutdown signal to all tasks spawned via the task // executor and awaiting on tasks spawned with graceful shutdown - task_manager.graceful_shutdown_with_timeout(std::time::Duration::from_secs(10)); + task_manager.graceful_shutdown_with_timeout(Duration::from_secs(5)); } - // drop the tokio runtime on a separate thread because drop blocks until its pools - // (including blocking pool) are shutdown. In other words `drop(tokio_runtime)` would block - // the current thread but we want to exit right away. + // `drop(tokio_runtime)` would block the current thread until its pools + // (including blocking pool) are shutdown. Since we want to exit as soon as possible, drop + // it on a separate thread and wait for up to 5 seconds for this operation to + // complete. + let (tx, rx) = mpsc::channel(); std::thread::Builder::new() .name("tokio-runtime-shutdown".to_string()) - .spawn(move || drop(tokio_runtime)) + .spawn(move || { + drop(tokio_runtime); + let _ = tx.send(()); + }) .unwrap(); + let _ = rx.recv_timeout(Duration::from_secs(5)).inspect_err(|err| { + debug!(target: "reth::cli", %err, "tokio runtime shutdown timed out"); + }); + command_res } diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml new file mode 100644 index 000000000000..f38421bc0954 --- /dev/null +++ b/crates/cli/util/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-cli-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +reth-fs-util.workspace = true +reth-network.workspace = true +reth-primitives.workspace = true +secp256k1.workspace = true +thiserror.workspace = true +eyre.workspace = true + +[dev-dependencies] +proptest.workspace = true + +[lints] +workspace = true diff --git a/crates/cli/util/src/lib.rs b/crates/cli/util/src/lib.rs new file mode 100644 index 000000000000..39d7b7f98a51 --- /dev/null +++ b/crates/cli/util/src/lib.rs @@ -0,0 +1,17 @@ +//! This crate defines a set of commonly used cli utils. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Helper function to load a secret key from a file. +pub mod load_secret_key; +pub use load_secret_key::get_secret_key; + +/// Cli parsers functions. +pub mod parsers; +pub use parsers::{hash_or_num_value_parser, parse_duration_from_secs, parse_socket_address}; diff --git a/crates/node-core/src/args/secret_key.rs b/crates/cli/util/src/load_secret_key.rs similarity index 100% rename from crates/node-core/src/args/secret_key.rs rename to crates/cli/util/src/load_secret_key.rs diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs new file mode 100644 index 000000000000..5e7c8c785373 --- /dev/null +++ b/crates/cli/util/src/parsers.rs @@ -0,0 +1,96 @@ +use reth_primitives::{BlockHashOrNumber, B256}; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, + str::FromStr, + time::Duration, +}; + +/// Helper to parse a [Duration] from seconds +pub fn parse_duration_from_secs(arg: &str) -> eyre::Result { + let seconds = arg.parse()?; + Ok(Duration::from_secs(seconds)) +} + +/// Parse [`BlockHashOrNumber`] +pub fn hash_or_num_value_parser(value: &str) -> eyre::Result { + match B256::from_str(value) { + Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)), + Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)), + } +} + +/// Error thrown while parsing a socket address. +#[derive(thiserror::Error, Debug)] +pub enum SocketAddressParsingError { + /// Failed to convert the string into a socket addr + #[error("could not parse socket address: {0}")] + Io(#[from] std::io::Error), + /// Input must not be empty + #[error("cannot parse socket address from empty string")] + Empty, + /// Failed to parse the address + #[error("could not parse socket address from {0}")] + Parse(String), + /// Failed to parse port + #[error("could not parse port: {0}")] + Port(#[from] std::num::ParseIntError), +} + +/// Parse a [`SocketAddr`] from a `str`. +/// +/// The following formats are checked: +/// +/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the +/// hostname is set to `localhost`. +/// - If the value contains `:` it is assumed to be the format `:` +/// - Otherwise it is assumed to be a hostname +/// +/// An error is returned if the value is empty. +pub fn parse_socket_address(value: &str) -> eyre::Result { + if value.is_empty() { + return Err(SocketAddressParsingError::Empty) + } + + if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) { + let port: u16 = port.parse()?; + return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) + } + if let Ok(port) = value.parse::() { + return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) + } + value + .to_socket_addrs()? + .next() + .ok_or_else(|| SocketAddressParsingError::Parse(value.to_string())) +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::Rng; + use secp256k1::rand::thread_rng; + + #[test] + fn parse_socket_addresses() { + for value in ["localhost:9000", ":9000", "9000"] { + let socket_addr = parse_socket_address(value) + .unwrap_or_else(|_| panic!("could not parse socket address: {value}")); + + assert!(socket_addr.ip().is_loopback()); + assert_eq!(socket_addr.port(), 9000); + } + } + + #[test] + fn parse_socket_address_random() { + let port: u16 = thread_rng().gen(); + + for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] { + let socket_addr = parse_socket_address(&value) + .unwrap_or_else(|_| panic!("could not parse socket address: {value}")); + + assert!(socket_addr.ip().is_loopback()); + assert_eq!(socket_addr.port(), port); + } + } +} diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 527f5b1538ed..91dcfc772e9c 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -12,8 +12,9 @@ workspace = true [dependencies] # reth -reth-network.workspace = true +reth-network-types = { workspace = true, features = ["serde"] } reth-prune-types.workspace = true +reth-stages-types.workspace = true # serde serde.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 474576d416a0..2d7c6b628c46 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -1,7 +1,8 @@ //! Configuration files. -use reth_network::{PeersConfig, SessionsConfig}; +use reth_network_types::{PeersConfig, SessionsConfig}; use reth_prune_types::PruneModes; +use reth_stages_types::ExecutionStageThresholds; use serde::{Deserialize, Deserializer, Serialize}; use std::{ ffi::OsStr, @@ -53,7 +54,7 @@ impl Config { } /// Sets the pruning configuration. - pub fn update_prune_confing(&mut self, prune_config: PruneConfig) { + pub fn update_prune_config(&mut self, prune_config: PruneConfig) { self.prune = Some(prune_config); } } @@ -217,6 +218,17 @@ impl Default for ExecutionConfig { } } +impl From for ExecutionStageThresholds { + fn from(config: ExecutionConfig) -> Self { + Self { + max_blocks: config.max_blocks, + max_changes: config.max_changes, + max_cumulative_gas: config.max_cumulative_gas, + max_duration: config.max_duration, + } + } +} + /// Hashing stage configuration. #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 8aa6e1656d94..edf9f84389dc 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -13,9 +13,11 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-beacon-consensus.workspace = true reth-primitives.workspace = true reth-execution-errors.workspace = true +reth-execution-types.workspace = true reth-network-p2p.workspace = true reth-provider.workspace = true reth-stages-api.workspace = true @@ -35,5 +37,4 @@ tokio-stream.workspace = true tracing.workspace = true [features] -# Included solely to ignore certain tests. -optimism = [] +optimism = ["reth-provider/optimism"] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index 11d511ced068..2e233ef60b72 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -4,11 +4,11 @@ use crate::Storage; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, - headers::client::{HeadersClient, HeadersFut, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, priority::Priority, }; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, B256}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index ada2936ff1b2..2fd93e7caab6 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,17 +16,17 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_engine_primitives::EngineTypes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, - eip4844::calculate_excess_blob_gas, - proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - ChainSpec, Header, Requests, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, - U256, + constants::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::calculate_excess_blob_gas, proofs, Block, + BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Bloom, Header, + Requests, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, }; -use reth_provider::{BlockReaderIdExt, ExecutionOutcome, StateProviderFactory, StateRootProvider}; +use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::TransactionPool; use std::{ @@ -56,7 +56,7 @@ pub struct AutoSealConsensus { impl AutoSealConsensus { /// Create a new instance of [`AutoSealConsensus`] - pub fn new(chain_spec: Arc) -> Self { + pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } @@ -263,7 +263,7 @@ impl StorageInner { ommers: &[Header], withdrawals: Option<&Withdrawals>, requests: Option<&Requests>, - chain_spec: Arc, + chain_spec: &ChainSpec, ) -> Header { // check previous block for base fee let base_fee_per_gas = self.headers.get(&self.best_block).and_then(|parent| { @@ -287,7 +287,7 @@ impl StorageInner { ommers_hash: proofs::calculate_ommers_root(ommers), beneficiary: Default::default(), state_root: Default::default(), - transactions_root: Default::default(), + transactions_root: proofs::calculate_transaction_root(transactions), receipts_root: Default::default(), withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), logs_bloom: Default::default(), @@ -327,12 +327,6 @@ impl StorageInner { Some(calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } - header.transactions_root = if transactions.is_empty() { - EMPTY_TRANSACTIONS - } else { - proofs::calculate_transaction_root(transactions) - }; - header } @@ -367,7 +361,7 @@ impl StorageInner { &ommers, withdrawals.as_ref(), requests.as_ref(), - chain_spec, + &chain_spec, ); let block = Block { @@ -387,8 +381,13 @@ impl StorageInner { ); // execute the block - let BlockExecutionOutput { state, receipts, requests: block_execution_requests, .. } = - executor.executor(&mut db).execute((&block, U256::ZERO).into())?; + let BlockExecutionOutput { + state, + receipts, + requests: block_execution_requests, + gas_used, + .. + } = executor.executor(&mut db).execute((&block, U256::ZERO).into())?; let execution_outcome = ExecutionOutcome::new( state, receipts.into(), @@ -405,8 +404,30 @@ impl StorageInner { trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); - // calculate the state root + // now we need to update certain header fields with the results of the execution header.state_root = db.state_root(execution_outcome.state())?; + header.gas_used = gas_used; + + let receipts = execution_outcome.receipts_by_block(header.number); + + // update logs bloom + let receipts_with_bloom = + receipts.iter().map(|r| r.as_ref().unwrap().bloom_slow()).collect::>(); + header.logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | *r); + + // update receipts root + header.receipts_root = { + #[cfg(feature = "optimism")] + let receipts_root = execution_outcome + .optimism_receipts_root_slow(header.number, &chain_spec, header.timestamp) + .expect("Receipts is present"); + + #[cfg(not(feature = "optimism"))] + let receipts_root = + execution_outcome.receipts_root_slow(header.number).expect("Receipts is present"); + + receipts_root + }; trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); // finally insert into storage diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 697143e760da..39491a65f2cb 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -1,9 +1,10 @@ use crate::{mode::MiningMode, Storage}; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; +use reth_chainspec::ChainSpec; use reth_engine_primitives::EngineTypes; use reth_evm::execute::BlockExecutorProvider; -use reth_primitives::{ChainSpec, IntoRecoveredTransaction}; +use reth_primitives::IntoRecoveredTransaction; use reth_provider::{CanonChainTracker, StateProviderFactory}; use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 7693890cc762..bf74df0f7598 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-ethereum-consensus.workspace = true reth-blockchain-tree-api.workspace = true reth-primitives.workspace = true @@ -68,6 +69,7 @@ reth-config.workspace = true reth-testing-utils.workspace = true reth-exex-types.workspace = true reth-prune-types.workspace = true +alloy-genesis.workspace = true assert_matches.workspace = true @@ -76,6 +78,5 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", - "reth-ethereum-consensus/optimism", "reth-rpc/optimism", ] diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index afd19f6079eb..ba09dff6c017 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -3,7 +3,7 @@ use reth_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum}; /// The struct that keeps track of the received forkchoice state and their status. #[derive(Debug, Clone, Default)] -pub(crate) struct ForkchoiceStateTracker { +pub struct ForkchoiceStateTracker { /// The latest forkchoice state that we received. /// /// Caution: this can be invalid. @@ -20,7 +20,7 @@ impl ForkchoiceStateTracker { /// /// If the status is `VALID`, we also update the last valid forkchoice state and set the /// `sync_target` to `None`, since we're now fully synced. - pub(crate) fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + pub fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { if status.is_valid() { self.set_valid(state); } else if status.is_syncing() { @@ -76,7 +76,7 @@ impl ForkchoiceStateTracker { } /// Returns the last received `ForkchoiceState` to which we need to sync. - pub(crate) const fn sync_target_state(&self) -> Option { + pub const fn sync_target_state(&self) -> Option { self.last_syncing } @@ -139,9 +139,12 @@ impl From for ForkchoiceStatus { /// A helper type to check represent hashes of a [`ForkchoiceState`] #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) enum ForkchoiceStateHash { +pub enum ForkchoiceStateHash { + /// Head hash of the [`ForkchoiceState`]. Head(B256), + /// Safe hash of the [`ForkchoiceState`]. Safe(B256), + /// Finalized hash of the [`ForkchoiceState`]. Finalized(B256), } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 0f0d9e1da80b..0cffc67b3ff1 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -87,7 +87,7 @@ where /// Sends a transition configuration exchange message to the beacon consensus engine. /// /// See also - pub async fn transition_configuration_exchanged(&self) { + pub fn transition_configuration_exchanged(&self) { let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); } diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index d98df18b2984..beff742b0233 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -14,7 +14,8 @@ use tracing::warn; const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. -pub(crate) struct InvalidHeaderCache { +#[derive(Debug)] +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. headers: LruMap, /// Metrics for the cache. @@ -22,7 +23,8 @@ pub(crate) struct InvalidHeaderCache { } impl InvalidHeaderCache { - pub(crate) fn new(max_length: u32) -> Self { + /// Invalid header cache constructor. + pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } @@ -34,7 +36,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub(crate) fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option> { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -49,7 +51,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub(crate) fn insert_with_invalid_ancestor( + pub fn insert_with_invalid_ancestor( &mut self, header_hash: B256, invalid_ancestor: Arc
, diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 4dac9758372e..f58f620b44ac 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -48,7 +48,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update succeeded and no /// payload attributes were provided. - pub(crate) fn valid(status: PayloadStatus) -> Self { + pub fn valid(status: PayloadStatus) -> Self { Self { forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status), fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))), @@ -57,7 +57,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` with the given payload status, if the /// forkchoice update failed due to an invalid payload. - pub(crate) fn with_invalid(status: PayloadStatus) -> Self { + pub fn with_invalid(status: PayloadStatus) -> Self { Self { forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status), fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))), @@ -66,7 +66,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update failed because the /// given state is considered invalid - pub(crate) fn invalid_state() -> Self { + pub fn invalid_state() -> Self { Self { forkchoice_status: ForkchoiceStatus::Invalid, fut: Either::Left(futures::future::ready(Err(ForkchoiceUpdateError::InvalidState))), @@ -75,7 +75,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update was successful but /// payload attributes were invalid. - pub(crate) fn invalid_payload_attributes() -> Self { + pub fn invalid_payload_attributes() -> Self { Self { // This is valid because this is only reachable if the state and payload is valid forkchoice_status: ForkchoiceStatus::Valid, @@ -86,7 +86,7 @@ impl OnForkChoiceUpdated { } /// If the forkchoice update was successful and no payload attributes were provided, this method - pub(crate) fn updated_with_pending_payload_id( + pub const fn updated_with_pending_payload_id( payload_status: PayloadStatus, pending_payload_id: oneshot::Receiver>, ) -> Self { diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index aeadb3f8af26..2179618481ad 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -53,7 +53,7 @@ pub use error::{ }; mod invalid_headers; -use invalid_headers::InvalidHeaderCache; +pub use invalid_headers::InvalidHeaderCache; mod event; pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; @@ -62,13 +62,12 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; -pub use forkchoice::ForkchoiceStatus; -use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}; +pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; mod metrics; use metrics::EngineMetrics; -pub(crate) mod sync; +pub mod sync; use sync::{EngineSyncController, EngineSyncEvent}; /// Hooks for running during the main loop of @@ -89,6 +88,18 @@ const MAX_INVALID_HEADERS: u32 = 512u32; /// If the distance exceeds this threshold, the pipeline will be used for sync. pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; +/// Represents a pending forkchoice update. +/// +/// This type encapsulates the necessary components for a pending forkchoice update +/// in the context of a beacon consensus engine. +/// +/// It consists of: +/// - The current fork choice state. +/// - Optional payload attributes specific to the engine type. +/// - Sender for the result of an oneshot channel, conveying the outcome of the fork choice update. +type PendingForkchoiceUpdate = + (ForkchoiceState, Option, oneshot::Sender>); + /// The beacon consensus engine is the driver that switches between historical and live sync. /// /// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are @@ -190,12 +201,7 @@ where /// It is recorded if we cannot process the forkchoice update because /// a hook with database read-write access is active. /// This is a temporary solution to always process missed FCUs. - #[allow(clippy::type_complexity)] - pending_forkchoice_update: Option<( - ForkchoiceState, - Option, - oneshot::Sender>, - )>, + pending_forkchoice_update: Option>, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, @@ -240,7 +246,6 @@ where task_spawner: Box, sync_state_updater: Box, max_block: Option, - run_pipeline_continuously: bool, payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, @@ -254,7 +259,6 @@ where task_spawner, sync_state_updater, max_block, - run_pipeline_continuously, payload_builder, target, pipeline_run_threshold, @@ -285,7 +289,6 @@ where task_spawner: Box, sync_state_updater: Box, max_block: Option, - run_pipeline_continuously: bool, payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, @@ -299,7 +302,6 @@ where pipeline, client, task_spawner.clone(), - run_pipeline_continuously, max_block, blockchain.chain_spec(), event_sender.clone(), @@ -1448,11 +1450,6 @@ where return Ok(()); } - // update the canon chain if continuous is enabled - if self.sync.run_pipeline_continuously() { - self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; - } - let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { Some(current_state) => current_state, None => { @@ -1984,7 +1981,7 @@ mod tests { BeaconForkChoiceUpdateError, }; use assert_matches::assert_matches; - use reth_primitives::{ChainSpecBuilder, MAINNET}; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; @@ -2169,9 +2166,8 @@ mod tests { provider .insert_block( b.clone().try_seal_with_senders().expect("invalid tx signature in block"), - None, ) - .map(|_| ()) + .map(drop) }) .expect("failed to insert"); provider.commit().unwrap(); @@ -2499,8 +2495,9 @@ mod tests { mod new_payload { use super::*; + use alloy_genesis::Genesis; use reth_db::test_utils::create_test_static_files_dir; - use reth_primitives::{genesis::Genesis, Hardfork, U256}; + use reth_primitives::{EthereumHardfork, U256}; use reth_provider::{ providers::StaticFileProvider, test_utils::blocks::BlockchainTestData, }; @@ -2729,9 +2726,9 @@ mod tests { async fn payload_pre_merge() { let data = BlockchainTestData::default(); let mut block1 = data.blocks[0].0.block.clone(); - block1 - .header - .set_difficulty(MAINNET.fork(Hardfork::Paris).ttd().unwrap() - U256::from(1)); + block1.header.set_difficulty( + MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), + ); block1 = block1.unseal().seal_slow(); let (block2, exec_result2) = data.blocks[1].clone(); let mut block2 = block2.unseal().block; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index e4e25e413f4d..73fac64cf27e 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -5,13 +5,14 @@ use crate::{ ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; use futures::FutureExt; +use reth_chainspec::ChainSpec; use reth_db_api::database::Database; use reth_network_p2p::{ bodies::client::BodiesClient, full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, }; -use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, B256}; +use reth_primitives::{BlockNumber, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventSender; @@ -54,8 +55,6 @@ where /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. range_buffered_blocks: BinaryHeap>, - /// If enabled, the pipeline will be triggered continuously, as soon as it becomes idle - run_pipeline_continuously: bool, /// Max block after which the consensus engine would terminate the sync. Used for debugging /// purposes. max_block: Option, @@ -73,7 +72,6 @@ where pipeline: Pipeline, client: Client, pipeline_task_spawner: Box, - run_pipeline_continuously: bool, max_block: Option, chain_spec: Arc, event_sender: EventSender, @@ -89,7 +87,6 @@ where inflight_full_block_requests: Vec::new(), inflight_block_range_requests: Vec::new(), range_buffered_blocks: BinaryHeap::new(), - run_pipeline_continuously, event_sender, max_block, metrics: EngineSyncMetrics::default(), @@ -122,11 +119,6 @@ where self.update_block_download_metrics(); } - /// Returns whether or not the sync controller is set to run the pipeline continuously. - pub(crate) const fn run_pipeline_continuously(&self) -> bool { - self.run_pipeline_continuously - } - /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. #[allow(dead_code)] pub(crate) const fn is_pipeline_sync_pending(&self) -> bool { @@ -271,20 +263,14 @@ where fn try_spawn_pipeline(&mut self) -> Option { match &mut self.pipeline_state { PipelineState::Idle(pipeline) => { - let target = self.pending_pipeline_target.take(); - - if target.is_none() && !self.run_pipeline_continuously { - // nothing to sync - return None; - } - + let target = self.pending_pipeline_target.take()?; let (tx, rx) = oneshot::channel(); let pipeline = pipeline.take().expect("exists"); self.pipeline_task_spawner.spawn_critical_blocking( "pipeline task", Box::pin(async move { - let result = pipeline.run_as_fut(target).await; + let result = pipeline.run_as_fut(Some(target)).await; let _ = tx.send(result); }), ); @@ -294,7 +280,7 @@ where // outdated (included in the range the pipeline is syncing anyway) self.clear_block_download_requests(); - Some(EngineSyncEvent::PipelineStarted(target)) + Some(EngineSyncEvent::PipelineStarted(Some(target))) } PipelineState::Running(_) => None, } @@ -428,12 +414,10 @@ mod tests { use super::*; use assert_matches::assert_matches; use futures::poll; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; - use reth_primitives::{ - constants::ETHEREUM_BLOCK_GAS_LIMIT, BlockBody, ChainSpecBuilder, Header, SealedHeader, - MAINNET, - }; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlockBody, Header, SealedHeader}; use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome, }; @@ -453,7 +437,7 @@ mod tests { impl TestPipelineBuilder { /// Create a new [`TestPipelineBuilder`]. - fn new() -> Self { + const fn new() -> Self { Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new(), @@ -550,8 +534,6 @@ mod tests { pipeline, client, Box::::default(), - // run_pipeline_continuously: false here until we want to test this - false, self.max_block, chain_spec, Default::default(), diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 5286b93efd94..b3002d38e949 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -6,6 +6,7 @@ use crate::{ use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; +use reth_chainspec::ChainSpec; use reth_config::config::StageConfig; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; @@ -22,10 +23,10 @@ use reth_network_p2p::{ test_utils::NoopFullBlockClient, }; use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::{BlockNumber, ChainSpec, B256}; +use reth_primitives::{BlockNumber, B256}; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, - ExecutionOutcome, HeaderSyncMode, + ExecutionOutcome, }; use reth_prune::Pruner; use reth_prune_types::PruneModes; @@ -57,7 +58,7 @@ pub struct TestEnv { } impl TestEnv { - fn new( + const fn new( db: DB, tip_rx: watch::Receiver, engine_handle: BeaconConsensusEngineHandle, @@ -371,7 +372,7 @@ where Pipeline::builder().add_stages(DefaultStages::new( provider_factory.clone(), - HeaderSyncMode::Tip(tip_rx.clone()), + tip_rx.clone(), Arc::clone(&consensus), header_downloader, body_downloader, @@ -392,7 +393,8 @@ where let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let config = BlockchainTreeConfig::new(1, 2, 3, 2); let tree = Arc::new(ShareableBlockchainTree::new( - BlockchainTree::new(externals, config, None).expect("failed to create tree"), + BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"), )); let latest = self.base_config.chain_spec.genesis_header().seal_slow(); let blockchain_provider = @@ -418,7 +420,6 @@ where Box::::default(), Box::::default(), None, - false, payload_builder, None, self.base_config.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index fa2cac1acdf7..dca553deb4c9 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-consensus.workspace = true diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 318cbc0286e1..e4b2abc13ba6 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,4 +1,6 @@ -use reth_primitives::{constants::ETH_TO_WEI, BlockNumber, Chain, ChainSpec, Hardfork, U256}; +use reth_chainspec::{ChainSpec, EthereumHardfork}; +use reth_primitives::{constants::ETH_TO_WEI, BlockNumber, U256}; + /// Calculates the base block reward. /// /// The base block reward is defined as: @@ -24,16 +26,23 @@ pub fn base_block_reward( block_difficulty: U256, total_difficulty: U256, ) -> Option { - if chain_spec.chain == Chain::goerli() || - chain_spec.fork(Hardfork::Paris).active_at_ttd(total_difficulty, block_difficulty) - { + if chain_spec.fork(EthereumHardfork::Paris).active_at_ttd(total_difficulty, block_difficulty) { None - } else if chain_spec.fork(Hardfork::Constantinople).active_at_block(block_number) { - Some(ETH_TO_WEI * 2) - } else if chain_spec.fork(Hardfork::Byzantium).active_at_block(block_number) { - Some(ETH_TO_WEI * 3) } else { - Some(ETH_TO_WEI * 5) + Some(base_block_reward_pre_merge(chain_spec, block_number)) + } +} + +/// Calculates the base block reward __before__ the merge (Paris hardfork). +/// +/// Caution: The caller must ensure that the block number is before the merge. +pub fn base_block_reward_pre_merge(chain_spec: &ChainSpec, block_number: BlockNumber) -> u128 { + if chain_spec.fork(EthereumHardfork::Constantinople).active_at_block(block_number) { + ETH_TO_WEI * 2 + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_block(block_number) { + ETH_TO_WEI * 3 + } else { + ETH_TO_WEI * 5 } } @@ -45,9 +54,10 @@ pub fn base_block_reward( /// # Examples /// /// ``` +/// # use reth_chainspec::MAINNET; /// # use reth_consensus_common::calc::{base_block_reward, block_reward}; /// # use reth_primitives::constants::ETH_TO_WEI; -/// # use reth_primitives::{MAINNET, U256}; +/// # use reth_primitives::U256; /// # /// // This is block 126 on mainnet. /// let block_number = 126; @@ -102,7 +112,7 @@ pub const fn ommer_reward( #[cfg(test)] mod tests { use super::*; - use reth_primitives::MAINNET; + use reth_chainspec::MAINNET; #[test] fn calc_base_block_reward() { diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 5e4caf1a84bb..647e95e653ac 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,64 +1,39 @@ //! Collection of methods for block validation. +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ constants::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, MAXIMUM_EXTRA_DATA_SIZE, }, - ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, + eip4844::calculate_excess_blob_gas, + EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, }; -/// Validate header standalone -pub fn validate_header_standalone( - header: &SealedHeader, - chain_spec: &ChainSpec, -) -> Result<(), ConsensusError> { - // Gas used needs to be less than gas limit. Gas used is going to be checked after execution. +/// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. +#[inline] +pub fn validate_header_gas(header: &SealedHeader) -> Result<(), ConsensusError> { if header.gas_used > header.gas_limit { return Err(ConsensusError::HeaderGasUsedExceedsGasLimit { gas_used: header.gas_used, gas_limit: header.gas_limit, }); } + Ok(()) +} - // Check if base fee is set. - if chain_spec.fork(Hardfork::London).active_at_block(header.number) && +/// Ensure the EIP-1559 base fee is set if the London hardfork is active. +#[inline] +pub fn validate_header_base_fee( + header: &SealedHeader, + chain_spec: &ChainSpec, +) -> Result<(), ConsensusError> { + if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number) && header.base_fee_per_gas.is_none() { return Err(ConsensusError::BaseFeeMissing); } - - let wd_root_missing = header.withdrawals_root.is_none() && !chain_spec.is_optimism(); - - // EIP-4895: Beacon chain push withdrawals as operations - if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && wd_root_missing { - return Err(ConsensusError::WithdrawalsRootMissing); - } else if !chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && - header.withdrawals_root.is_some() - { - return Err(ConsensusError::WithdrawalsRootUnexpected); - } - - // Ensures that EIP-4844 fields are valid once cancun is active. - if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_4844_header_standalone(header)?; - } else if header.blob_gas_used.is_some() { - return Err(ConsensusError::BlobGasUsedUnexpected); - } else if header.excess_blob_gas.is_some() { - return Err(ConsensusError::ExcessBlobGasUnexpected); - } else if header.parent_beacon_block_root.is_some() { - return Err(ConsensusError::ParentBeaconBlockRootUnexpected); - } - - if chain_spec.is_prague_active_at_timestamp(header.timestamp) { - if header.requests_root.is_none() { - return Err(ConsensusError::RequestsRootMissing); - } - } else if header.requests_root.is_some() { - return Err(ConsensusError::RequestsRootUnexpected); - } - Ok(()) } @@ -175,23 +150,133 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons /// /// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. /// This must be 32 bytes or fewer; formally Hx. +#[inline] pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) + let extradata_len = header.extra_data.len(); + if extradata_len > MAXIMUM_EXTRA_DATA_SIZE { + Err(ConsensusError::ExtraDataExceedsMax { len: extradata_len }) } else { Ok(()) } } +/// Validates against the parent hash and number. +/// +/// This function ensures that the header block number is sequential and that the hash of the parent +/// header matches the parent hash in the header. +#[inline] +pub fn validate_against_parent_hash_number( + header: &SealedHeader, + parent: &SealedHeader, +) -> Result<(), ConsensusError> { + // Parent number is consistent. + if parent.number + 1 != header.number { + return Err(ConsensusError::ParentBlockNumberMismatch { + parent_block_number: parent.number, + block_number: header.number, + }) + } + + if parent.hash() != header.parent_hash { + return Err(ConsensusError::ParentHashMismatch( + GotExpected { got: header.parent_hash, expected: parent.hash() }.into(), + )) + } + + Ok(()) +} + +/// Validates the base fee against the parent and EIP-1559 rules. +#[inline] +pub fn validate_against_parent_eip1559_base_fee( + header: &SealedHeader, + parent: &SealedHeader, + chain_spec: &ChainSpec, +) -> Result<(), ConsensusError> { + if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { + let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; + + let expected_base_fee = + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { + reth_primitives::constants::EIP1559_INITIAL_BASE_FEE + } else { + // This BaseFeeMissing will not happen as previous blocks are checked to have + // them. + parent + .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp)) + .ok_or(ConsensusError::BaseFeeMissing)? + }; + if expected_base_fee != base_fee { + return Err(ConsensusError::BaseFeeDiff(GotExpected { + expected: expected_base_fee, + got: base_fee, + })) + } + } + + Ok(()) +} + +/// Validates the timestamp against the parent to make sure it is in the past. +#[inline] +pub fn validate_against_parent_timestamp( + header: &SealedHeader, + parent: &SealedHeader, +) -> Result<(), ConsensusError> { + if header.is_timestamp_in_past(parent.timestamp) { + return Err(ConsensusError::TimestampIsInPast { + parent_timestamp: parent.timestamp, + timestamp: header.timestamp, + }) + } + Ok(()) +} + +/// Validates that the EIP-4844 header fields are correct with respect to the parent block. This +/// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and +/// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the +/// parent header fields. +pub fn validate_against_parent_4844( + header: &SealedHeader, + parent: &SealedHeader, +) -> Result<(), ConsensusError> { + // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): + // + // > For the first post-fork block, both parent.blob_gas_used and parent.excess_blob_gas + // > are evaluated as 0. + // + // This means in the first post-fork block, calculate_excess_blob_gas will return 0. + let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); + + if header.blob_gas_used.is_none() { + return Err(ConsensusError::BlobGasUsedMissing) + } + let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; + + let expected_excess_blob_gas = + calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); + if expected_excess_blob_gas != excess_blob_gas { + return Err(ConsensusError::ExcessBlobGasDiff { + diff: GotExpected { got: excess_blob_gas, expected: expected_excess_blob_gas }, + parent_excess_blob_gas, + parent_blob_gas_used, + }) + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; use mockall::mock; use rand::Rng; + use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, - BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, - Withdrawal, Withdrawals, U256, + BlockNumber, Bytes, Signature, Transaction, TransactionSigned, TxEip4844, Withdrawal, + Withdrawals, U256, }; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, @@ -410,22 +495,6 @@ mod tests { .return_const(Ok(Some(Withdrawal { index: 2, ..Default::default() }))); } - #[test] - fn shanghai_block_zero_withdrawals() { - // ensures that if shanghai is activated, and we include a block with a withdrawals root, - // that the header is valid - let chain_spec = ChainSpecBuilder::mainnet().shanghai_activated().build(); - - let header = Header { - base_fee_per_gas: Some(1337u64), - withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), - ..Default::default() - } - .seal_slow(); - - assert_eq!(validate_header_standalone(&header, &chain_spec), Ok(())); - } - #[test] fn cancun_block_incorrect_blob_gas_used() { let chain_spec = ChainSpecBuilder::mainnet().cancun_activated().build(); diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 8ea4236bfa12..1d4d6d758c42 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -16,7 +16,9 @@ reth-primitives.workspace = true # misc auto_impl.workspace = true -thiserror.workspace = true +thiserror-no-std = {workspace = true, default-features = false } [features] +default = ["std"] +std = ["thiserror-no-std/std"] test-utils = [] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index fd3c694c2fa9..7aee9f15e706 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -7,14 +7,23 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] use reth_primitives::{ - BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, GotExpectedBoxed, Header, - HeaderValidationError, InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, + constants::MINIMUM_GAS_LIMIT, BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, + GotExpectedBoxed, Header, InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, B256, U256, }; + +#[cfg(feature = "std")] use std::fmt::Debug; +#[cfg(not(feature = "std"))] +extern crate alloc; + +#[cfg(not(feature = "std"))] +use alloc::{fmt::Debug, vec::Vec}; + /// A consensus implementation that does nothing. pub mod noop; @@ -119,7 +128,7 @@ pub trait Consensus: Debug + Send + Sync { } /// Consensus Errors -#[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] +#[derive(thiserror_no_std::Error, Debug, PartialEq, Eq, Clone)] pub enum ConsensusError { /// Error when the gas used in the header exceeds the gas limit. #[error("block used gas ({gas_used}) is greater than gas limit ({gas_limit})")] @@ -197,6 +206,10 @@ pub enum ConsensusError { block_number: BlockNumber, }, + /// Error when the parent hash does not match the expected parent hash. + #[error("mismatched parent hash: {0}")] + ParentHashMismatch(GotExpectedBoxed), + /// Error when the block timestamp is in the future compared to our clock time. #[error("block timestamp {timestamp} is in the future compared to our clock time {present_timestamp}")] TimestampIsInFuture { @@ -320,9 +333,60 @@ pub enum ConsensusError { #[error(transparent)] InvalidTransaction(#[from] InvalidTransactionError), - /// Error type transparently wrapping `HeaderValidationError`. - #[error(transparent)] - HeaderValidationError(#[from] HeaderValidationError), + /// Error when the block's base fee is different from the expected base fee. + #[error("block base fee mismatch: {0}")] + BaseFeeDiff(GotExpected), + + /// Error when there is an invalid excess blob gas. + #[error( + "invalid excess blob gas: {diff}; \ + parent excess blob gas: {parent_excess_blob_gas}, \ + parent blob gas used: {parent_blob_gas_used}" + )] + ExcessBlobGasDiff { + /// The excess blob gas diff. + diff: GotExpected, + /// The parent excess blob gas. + parent_excess_blob_gas: u64, + /// The parent blob gas used. + parent_blob_gas_used: u64, + }, + + /// Error when the child gas limit exceeds the maximum allowed increase. + #[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")] + GasLimitInvalidIncrease { + /// The parent gas limit. + parent_gas_limit: u64, + /// The child gas limit. + child_gas_limit: u64, + }, + + /// Error indicating that the child gas limit is below the minimum allowed limit. + /// + /// This error occurs when the child gas limit is less than the specified minimum gas limit. + #[error("child gas limit {child_gas_limit} is below the minimum allowed limit ({MINIMUM_GAS_LIMIT})")] + GasLimitInvalidMinimum { + /// The child gas limit. + child_gas_limit: u64, + }, + + /// Error when the child gas limit exceeds the maximum allowed decrease. + #[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")] + GasLimitInvalidDecrease { + /// The parent gas limit. + parent_gas_limit: u64, + /// The child gas limit. + child_gas_limit: u64, + }, + + /// Error when the block timestamp is in the past compared to the parent timestamp. + #[error("block timestamp {timestamp} is in the past compared to the parent timestamp {parent_timestamp}")] + TimestampIsInPast { + /// The parent block's timestamp. + parent_timestamp: u64, + /// The block's timestamp. + timestamp: u64, + }, } impl ConsensusError { @@ -333,6 +397,6 @@ impl ConsensusError { } /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. -#[derive(thiserror::Error, Debug)] +#[derive(thiserror_no_std::Error, Debug)] #[error("Consensus error: {0}, Invalid header: {1:?}")] pub struct HeaderConsensusError(ConsensusError, SealedHeader); diff --git a/crates/consensus/debug-client/src/client.rs b/crates/consensus/debug-client/src/client.rs index a63b7a85d704..e72f58855eeb 100644 --- a/crates/consensus/debug-client/src/client.rs +++ b/crates/consensus/debug-client/src/client.rs @@ -92,17 +92,19 @@ impl DebugConsensusClient

{ let block_hash = payload.block_hash(); let block_number = payload.block_number(); + previous_block_hashes.push(block_hash); + // Send new events to execution client - reth_rpc_api::EngineApiClient::::new_payload_v3( + let _ = reth_rpc_api::EngineApiClient::::new_payload_v3( &execution_client, payload.execution_payload_v3, payload.versioned_hashes, payload.parent_beacon_block_root, ) .await - .unwrap(); - - previous_block_hashes.push(block_hash); + .inspect_err(|err| { + warn!(target: "consensus::debug-client", %err, %block_hash, %block_number, "failed to submit new payload to execution client"); + }); // Load previous block hashes. We're using (head - 32) and (head - 64) as the safe and // finalized block hashes. diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 77a5676ab284..f472da06bc12 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -10,16 +10,18 @@ repository.workspace = true [dependencies] reth.workspace = true +reth-chainspec.workspace = true reth-primitives.workspace = true reth-tracing.workspace = true -reth-db.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } reth-rpc.workspace = true reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider.workspace = true -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true +reth-network-peers.workspace = true jsonrpsee.workspace = true @@ -29,7 +31,7 @@ tokio.workspace = true tokio-stream.workspace = true serde_json.workspace = true alloy-signer.workspace = true -alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } +alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index aa7d46428bef..e55a9a24ba2e 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -4,11 +4,11 @@ use reth::{ builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; +use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ components::NodeComponentsBuilder, FullNodeTypesAdapter, Node, NodeAdapter, RethFullAdapter, }; -use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; use std::sync::Arc; use tracing::{span, Level}; @@ -58,15 +58,12 @@ where let mut nodes: Vec> = Vec::with_capacity(num_nodes); for idx in 0..num_nodes { - let mut node_config = NodeConfig::test() + let node_config = NodeConfig::test() .with_chain(chain_spec.clone()) .with_network(network_config.clone()) .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - if is_dev { - node_config = node_config.dev(); - } + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) + .set_dev(is_dev); let span = span!(Level::INFO, "node", idx); let _enter = span.enter(); diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 5b148b09f55c..e5791afd76f8 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,6 +1,9 @@ use futures_util::StreamExt; -use reth::network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}; -use reth_primitives::NodeRecord; +use reth::{ + network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}, + rpc::types::PeerId, +}; +use reth_network_peers::NodeRecord; use reth_tokio_util::EventStream; use reth_tracing::tracing::info; @@ -23,7 +26,7 @@ impl NetworkTestContext { match self.network_events.next().await { Some(NetworkEvent::PeerAdded(_)) => (), - _ => panic!("Expected a peer added event"), + ev => panic!("Expected a peer added event, got: {ev:?}"), } } @@ -32,13 +35,17 @@ impl NetworkTestContext { self.network.local_node_record() } - /// Expects a session to be established - pub async fn expect_session(&mut self) { - match self.network_events.next().await { - Some(NetworkEvent::SessionEstablished { remote_addr, .. }) => { - info!(?remote_addr, "Session established") + /// Awaits the next event for an established session. + pub async fn next_session_established(&mut self) -> Option { + while let Some(ev) = self.network_events.next().await { + match ev { + NetworkEvent::SessionEstablished { peer_id, .. } => { + info!("Session established with peer: {:?}", peer_id); + return Some(peer_id) + } + _ => continue, } - _ => panic!("Expected session established event"), } + None } } diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 7530e5818cae..5e3ed2a8c75e 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -9,6 +9,7 @@ use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, + payload::PayloadTypes, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::types::engine::PayloadStatusEnum, }; @@ -51,11 +52,11 @@ where }) } + /// Establish a connection to the node pub async fn connect(&mut self, node: &mut NodeTestContext) { self.network.add_peer(node.network.record()).await; - node.network.add_peer(self.network.record()).await; - node.network.expect_session().await; - self.network.expect_session().await; + node.network.next_session_established().await; + self.network.next_session_established().await; } /// Advances the chain `length` blocks. @@ -65,17 +66,17 @@ where &mut self, length: u64, tx_generator: impl Fn(u64) -> Pin>>, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + Copy, ) -> eyre::Result< Vec<( - ::BuiltPayload, - ::PayloadBuilderAttributes, + ::BuiltPayload, + ::PayloadBuilderAttributes, )>, > where ::ExecutionPayloadV3: - From<::BuiltPayload> + PayloadEnvelopeExt, + From<::BuiltPayload> + PayloadEnvelopeExt, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { @@ -96,14 +97,14 @@ where /// It triggers the resolve payload via engine api and expects the built payload event. pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, ) -> eyre::Result<( - <::Engine as EngineTypes>::BuiltPayload, - <::Engine as EngineTypes>::PayloadBuilderAttributes, + <::Engine as PayloadTypes>::BuiltPayload, + <::Engine as PayloadTypes>::PayloadBuilderAttributes, )> where ::ExecutionPayloadV3: - From<::BuiltPayload> + PayloadEnvelopeExt, + From<::BuiltPayload> + PayloadEnvelopeExt, { // trigger new payload building draining the pool let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); @@ -121,14 +122,14 @@ where pub async fn advance_block( &mut self, versioned_hashes: Vec, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, ) -> eyre::Result<( - ::BuiltPayload, - <::Engine as EngineTypes>::PayloadBuilderAttributes, + ::BuiltPayload, + <::Engine as PayloadTypes>::PayloadBuilderAttributes, )> where ::ExecutionPayloadV3: - From<::BuiltPayload> + PayloadEnvelopeExt, + From<::BuiltPayload> + PayloadEnvelopeExt, { let (payload, eth_attr) = self.new_payload(attributes_generator).await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 09f161a91dc7..b05d5df895a0 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,8 +1,13 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; -use reth::{api::FullNodeComponents, builder::rpc::RpcRegistry, rpc::api::DebugApiServer}; +use reth::{ + builder::{rpc::RpcRegistry, FullNodeComponents}, + rpc::{ + api::{eth::helpers::EthTransactions, DebugApiServer}, + server_types::eth::EthResult, + }, +}; use reth_primitives::{Bytes, B256}; -use reth_rpc::eth::{error::EthResult, EthTransactions}; pub struct RpcTestContext { pub inner: RpcRegistry, diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 8fe7efd0e77b..0719c7733e6a 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -1,31 +1,35 @@ use alloy_consensus::{ - BlobTransactionSidecar, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, + BlobTransactionSidecar, EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, + TxEnvelope, }; -use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; +use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; use alloy_rpc_types::{TransactionInput, TransactionRequest}; -use alloy_signer_wallet::LocalWallet; +use alloy_signer_local::PrivateKeySigner; use eyre::Ok; use reth_primitives::{hex, Address, Bytes, U256}; -use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, B256}; +use reth_primitives::B256; pub struct TransactionTestContext; impl TransactionTestContext { /// Creates a static transfer and signs it, returning bytes - pub async fn transfer_tx(chain_id: u64, wallet: LocalWallet) -> TxEnvelope { + pub async fn transfer_tx(chain_id: u64, wallet: PrivateKeySigner) -> TxEnvelope { let tx = tx(chain_id, None, 0); Self::sign_tx(wallet, tx).await } /// Creates a static transfer and signs it, returning bytes - pub async fn transfer_tx_bytes(chain_id: u64, wallet: LocalWallet) -> Bytes { + pub async fn transfer_tx_bytes(chain_id: u64, wallet: PrivateKeySigner) -> Bytes { let signed = Self::transfer_tx(chain_id, wallet).await; signed.encoded_2718().into() } /// Creates a tx with blob sidecar and sign it - pub async fn tx_with_blobs(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + pub async fn tx_with_blobs( + chain_id: u64, + wallet: PrivateKeySigner, + ) -> eyre::Result { let mut tx = tx(chain_id, None, 0); let mut builder = SidecarBuilder::::new(); @@ -40,13 +44,16 @@ impl TransactionTestContext { } /// Signs an arbitrary TransactionRequest using the provided wallet - pub async fn sign_tx(wallet: LocalWallet, tx: TransactionRequest) -> TxEnvelope { - let signer = EthereumSigner::from(wallet); + pub async fn sign_tx(wallet: PrivateKeySigner, tx: TransactionRequest) -> TxEnvelope { + let signer = EthereumWallet::from(wallet); tx.build(&signer).await.unwrap() } /// Creates a tx with blob sidecar and sign it, returning bytes - pub async fn tx_with_blobs_bytes(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + pub async fn tx_with_blobs_bytes( + chain_id: u64, + wallet: PrivateKeySigner, + ) -> eyre::Result { let signed = Self::tx_with_blobs(chain_id, wallet).await?; Ok(signed.encoded_2718().into()) @@ -54,23 +61,23 @@ impl TransactionTestContext { pub async fn optimism_l1_block_info_tx( chain_id: u64, - wallet: LocalWallet, + wallet: PrivateKeySigner, nonce: u64, ) -> Bytes { let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); let tx = tx(chain_id, Some(l1_block_info), nonce); - let signer = EthereumSigner::from(wallet); + let signer = EthereumWallet::from(wallet); tx.build(&signer).await.unwrap().encoded_2718().into() } /// Validates the sidecar of a given tx envelope and returns the versioned hashes pub fn validate_sidecar(tx: TxEnvelope) -> Vec { - let proof_setting = MAINNET_KZG_TRUSTED_SETUP.clone(); + let proof_setting = EnvKzgSettings::Default; match tx { TxEnvelope::Eip4844(signed) => match signed.tx() { TxEip4844Variant::TxEip4844WithSidecar(tx) => { - tx.validate_blob(&proof_setting).unwrap(); + tx.validate_blob(proof_setting.get()).unwrap(); tx.sidecar.versioned_hashes().collect() } _ => panic!("Expected Eip4844 transaction with sidecar"), diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index e841e7cd786c..d24ee2d3f0e4 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,9 +1,9 @@ use alloy_signer::Signer; -use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; +use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner}; /// One of the accounts of the genesis allocations. pub struct Wallet { - pub inner: LocalWallet, + pub inner: PrivateKeySigner, pub inner_nonce: u64, pub chain_id: u64, amount: usize, @@ -27,7 +27,7 @@ impl Wallet { self.derivation_path.as_deref().unwrap_or("m/44'/60'/0'/0/") } - pub fn gen(&self) -> Vec { + pub fn gen(&self) -> Vec { let builder = MnemonicBuilder::::default().phrase(TEST_MNEMONIC); // use the derivation path @@ -36,7 +36,7 @@ impl Wallet { let mut wallets = Vec::with_capacity(self.amount); for idx in 0..self.amount { let builder = - builder.clone().derivation_path(&format!("{derivation_path}{idx}")).unwrap(); + builder.clone().derivation_path(format!("{derivation_path}{idx}")).unwrap(); let wallet = builder.build().unwrap().with_chain_id(Some(self.chain_id)); wallets.push(wallet) } diff --git a/crates/engine-primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml similarity index 90% rename from crates/engine-primitives/Cargo.toml rename to crates/engine/primitives/Cargo.toml index 46da7286d51a..b44a4a8aa4e7 100644 --- a/crates/engine-primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-chainspec.workspace = true reth-payload-primitives.workspace = true # misc diff --git a/crates/engine-primitives/src/error.rs b/crates/engine/primitives/src/error.rs similarity index 100% rename from crates/engine-primitives/src/error.rs rename to crates/engine/primitives/src/error.rs diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs similarity index 62% rename from crates/engine-primitives/src/lib.rs rename to crates/engine/primitives/src/lib.rs index c10156fb786a..b83abc39e6cc 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -8,36 +8,26 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use core::fmt; -use reth_primitives::ChainSpec; - -use reth_payload_primitives::{ - BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadAttributes, - PayloadBuilderAttributes, PayloadOrAttributes, +use reth_chainspec::ChainSpec; +pub use reth_payload_primitives::{ + BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, + PayloadTypes, }; - use serde::{de::DeserializeOwned, ser::Serialize}; -/// The types that are used by the engine API. + +/// This type defines the versioned types of the engine API. +/// +/// This includes the execution payload types and payload attributes that are used to trigger a +/// payload job. Hence this trait is also [`PayloadTypes`]. pub trait EngineTypes: - DeserializeOwned + Serialize + fmt::Debug + Unpin + Send + Sync + Clone + PayloadTypes< + BuiltPayload: TryInto + + TryInto + + TryInto + + TryInto, + > + DeserializeOwned + + Serialize { - /// The RPC payload attributes type the CL node emits via the engine API. - type PayloadAttributes: PayloadAttributes + Unpin; - - /// The payload attributes type that contains information about a running payload job. - type PayloadBuilderAttributes: PayloadBuilderAttributes - + Clone - + Unpin; - - /// The built payload type. - type BuiltPayload: BuiltPayload - + Clone - + Unpin - + TryInto - + TryInto - + TryInto - + TryInto; - /// Execution Payload V1 type. type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; /// Execution Payload V2 type. diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml new file mode 100644 index 000000000000..e2a1c462d6c8 --- /dev/null +++ b/crates/engine/tree/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "reth-engine-tree" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-beacon-consensus.workspace = true +reth-blockchain-tree.workspace = true +reth-blockchain-tree-api.workspace = true +reth-chainspec.workspace = true +reth-consensus.workspace = true +reth-db.workspace = true +reth-db-api.workspace = true +reth-engine-primitives.workspace = true +reth-errors.workspace = true +reth-ethereum-consensus.workspace = true +reth-evm.workspace = true +reth-network-p2p.workspace = true +reth-payload-builder.workspace = true +reth-payload-primitives.workspace = true +reth-payload-validator.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-prune-types.workspace = true +reth-revm.workspace = true +reth-rpc-types.workspace = true +reth-stages-api.workspace = true +reth-static-file.workspace = true +reth-tasks.workspace = true +reth-tokio-util.workspace = true +reth-trie.workspace = true +revm.workspace = true + +# common +futures.workspace = true +tokio = { workspace = true, features = ["macros", "sync"] } +tokio-stream = { workspace = true, features = ["sync"] } + + +# metrics +metrics.workspace = true +reth-metrics = { workspace = true, features = ["common"] } + +# misc +aquamarine.workspace = true +parking_lot.workspace = true +tracing.workspace = true + +# optional deps for test-utils +reth-stages = { workspace = true, optional = true } +reth-tracing = { workspace = true, optional = true } + +[dev-dependencies] +# reth +reth-db = { workspace = true, features = ["test-utils"] } +reth-network-p2p = { workspace = true, features = ["test-utils"] } +reth-prune-types.workspace = true +reth-stages = { workspace = true, features = ["test-utils"] } +reth-tracing.workspace = true + +assert_matches.workspace = true + +[features] +test-utils = [ + "reth-db/test-utils", + "reth-network-p2p/test-utils", + "reth-stages/test-utils", + "reth-tracing" +] diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs new file mode 100644 index 000000000000..24153bed24c0 --- /dev/null +++ b/crates/engine/tree/src/backfill.rs @@ -0,0 +1,290 @@ +//! It is expected that the node has two sync modes: +//! +//! - Backfill sync: Sync to a certain block height in stages, e.g. download data from p2p then +//! execute that range. +//! - Live sync: In this mode the nodes is keeping up with the latest tip and listens for new +//! requests from the consensus client. +//! +//! These modes are mutually exclusive and the node can only be in one mode at a time. + +use futures::FutureExt; +use reth_db_api::database::Database; +use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; +use reth_tasks::TaskSpawner; +use std::task::{ready, Context, Poll}; +use tokio::sync::oneshot; +use tracing::trace; + +/// Backfill sync mode functionality. +pub trait BackfillSync: Send + Sync { + /// Performs a backfill action. + fn on_action(&mut self, action: BackfillAction); + + /// Polls the pipeline for completion. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll; +} + +/// The backfill actions that can be performed. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BackfillAction { + /// Start backfilling with the given target. + Start(PipelineTarget), +} + +/// The events that can be emitted on backfill sync. +#[derive(Debug)] +pub enum BackfillEvent { + /// Backfill sync idle. + Idle, + /// Backfill sync started. + Started(PipelineTarget), + /// Backfill sync finished. + /// + /// If this is returned, backfill sync is idle. + Finished(Result), + /// Sync task was dropped after it was started, unable to receive it because + /// channel closed. This would indicate a panicked task. + TaskDropped(String), +} + +/// Pipeline sync. +#[derive(Debug)] +pub struct PipelineSync +where + DB: Database, +{ + /// The type that can spawn the pipeline task. + pipeline_task_spawner: Box, + /// The current state of the pipeline. + /// The pipeline is used for large ranges. + pipeline_state: PipelineState, + /// Pending target block for the pipeline to sync + pending_pipeline_target: Option, +} + +impl PipelineSync +where + DB: Database + 'static, +{ + /// Create a new instance. + pub fn new(pipeline: Pipeline, pipeline_task_spawner: Box) -> Self { + Self { + pipeline_task_spawner, + pipeline_state: PipelineState::Idle(Some(pipeline)), + pending_pipeline_target: None, + } + } + + /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. + #[allow(dead_code)] + const fn is_pipeline_sync_pending(&self) -> bool { + self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle() + } + + /// Returns `true` if the pipeline is idle. + const fn is_pipeline_idle(&self) -> bool { + self.pipeline_state.is_idle() + } + + /// Returns `true` if the pipeline is active. + const fn is_pipeline_active(&self) -> bool { + !self.is_pipeline_idle() + } + + /// Sets a new target to sync the pipeline to. + /// + /// But ensures the target is not the zero hash. + fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { + if target.sync_target().is_some_and(|target| target.is_zero()) { + trace!( + target: "consensus::engine::sync", + "Pipeline target cannot be zero hash." + ); + // precaution to never sync to the zero hash + return + } + self.pending_pipeline_target = Some(target); + } + + /// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to + /// run continuously. + fn try_spawn_pipeline(&mut self) -> Option { + match &mut self.pipeline_state { + PipelineState::Idle(pipeline) => { + let target = self.pending_pipeline_target.take()?; + let (tx, rx) = oneshot::channel(); + + let pipeline = pipeline.take().expect("exists"); + self.pipeline_task_spawner.spawn_critical_blocking( + "pipeline task", + Box::pin(async move { + let result = pipeline.run_as_fut(Some(target)).await; + let _ = tx.send(result); + }), + ); + self.pipeline_state = PipelineState::Running(rx); + + Some(BackfillEvent::Started(target)) + } + PipelineState::Running(_) => None, + } + } + + /// Advances the pipeline state. + /// + /// This checks for the result in the channel, or returns pending if the pipeline is idle. + fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll { + let res = match self.pipeline_state { + PipelineState::Idle(_) => return Poll::Pending, + PipelineState::Running(ref mut fut) => { + ready!(fut.poll_unpin(cx)) + } + }; + let ev = match res { + Ok((_, result)) => BackfillEvent::Finished(result), + Err(why) => { + // failed to receive the pipeline + BackfillEvent::TaskDropped(why.to_string()) + } + }; + Poll::Ready(ev) + } +} + +impl BackfillSync for PipelineSync +where + DB: Database + 'static, +{ + fn on_action(&mut self, event: BackfillAction) { + match event { + BackfillAction::Start(target) => self.set_pipeline_sync_target(target), + } + } + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + // try to spawn a pipeline if a target is set + if let Some(event) = self.try_spawn_pipeline() { + return Poll::Ready(event) + } + + // make sure we poll the pipeline if it's active, and return any ready pipeline events + if !self.is_pipeline_idle() { + // advance the pipeline + if let Poll::Ready(event) = self.poll_pipeline(cx) { + return Poll::Ready(event) + } + } + + Poll::Pending + } +} + +/// The possible pipeline states within the sync controller. +/// +/// [`PipelineState::Idle`] means that the pipeline is currently idle. +/// [`PipelineState::Running`] means that the pipeline is currently running. +/// +/// NOTE: The differentiation between these two states is important, because when the pipeline is +/// running, it acquires the write lock over the database. This means that we cannot forward to the +/// blockchain tree any messages that would result in database writes, since it would result in a +/// deadlock. +#[derive(Debug)] +enum PipelineState { + /// Pipeline is idle. + Idle(Option>), + /// Pipeline is running and waiting for a response + Running(oneshot::Receiver>), +} + +impl PipelineState { + /// Returns `true` if the state matches idle. + const fn is_idle(&self) -> bool { + matches!(self, Self::Idle(_)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; + use assert_matches::assert_matches; + use futures::poll; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; + use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlockNumber, Header, B256}; + use reth_stages::ExecOutput; + use reth_stages_api::StageCheckpoint; + use reth_tasks::TokioTaskExecutor; + use std::{collections::VecDeque, future::poll_fn, sync::Arc}; + + struct TestHarness { + pipeline_sync: PipelineSync>>, + tip: B256, + } + + impl TestHarness { + fn new(total_blocks: usize, pipeline_done_after: u64) -> Self { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + // force the pipeline to be "done" after `pipeline_done_after` blocks + let pipeline = TestPipelineBuilder::new() + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(BlockNumber::from(pipeline_done_after)), + done: true, + })])) + .build(chain_spec); + + let pipeline_sync = PipelineSync::new(pipeline, Box::::default()); + let client = TestFullBlockClient::default(); + let header = Header { + base_fee_per_gas: Some(7), + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + ..Default::default() + } + .seal_slow(); + insert_headers_into_client(&client, header, 0..total_blocks); + + let tip = client.highest_block().expect("there should be blocks here").hash(); + + Self { pipeline_sync, tip } + } + } + + #[tokio::test] + async fn pipeline_started_and_finished() { + const TOTAL_BLOCKS: usize = 10; + const PIPELINE_DONE_AFTER: u64 = 5; + let TestHarness { mut pipeline_sync, tip } = + TestHarness::new(TOTAL_BLOCKS, PIPELINE_DONE_AFTER); + + let sync_future = poll_fn(|cx| pipeline_sync.poll(cx)); + let next_event = poll!(sync_future); + + // sync target not set, pipeline not started + assert_matches!(next_event, Poll::Pending); + + pipeline_sync.on_action(BackfillAction::Start(PipelineTarget::Sync(tip))); + + let sync_future = poll_fn(|cx| pipeline_sync.poll(cx)); + let next_event = poll!(sync_future); + + // sync target set, pipeline started + assert_matches!(next_event, Poll::Ready(BackfillEvent::Started(target)) => { + assert_eq!(target.sync_target().unwrap(), tip); + }); + + // the next event should be the pipeline finishing in a good state + let sync_future = poll_fn(|cx| pipeline_sync.poll(cx)); + let next_ready = sync_future.await; + assert_matches!(next_ready, BackfillEvent::Finished(result) => { + assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: PIPELINE_DONE_AFTER })); + }); + } +} diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs new file mode 100644 index 000000000000..e3f764beab6b --- /dev/null +++ b/crates/engine/tree/src/chain.rs @@ -0,0 +1,218 @@ +use crate::backfill::{BackfillAction, BackfillEvent, BackfillSync}; +use futures::Stream; +use reth_stages_api::PipelineTarget; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +/// The type that drives the chain forward. +/// +/// A state machine that orchestrates the components responsible for advancing the chain +/// +/// +/// ## Control flow +/// +/// The [`ChainOrchestrator`] is responsible for controlling the backfill sync and additional hooks. +/// It polls the given `handler`, which is responsible for advancing the chain, how is up to the +/// handler. However, due to database restrictions (e.g. exclusive write access), following +/// invariants apply: +/// - If the handler requests a backfill run (e.g. [`BackfillAction::Start`]), the handler must +/// ensure that while the backfill sync is running, no other write access is granted. +/// - At any time the [`ChainOrchestrator`] can request exclusive write access to the database +/// (e.g. if pruning is required), but will not do so until the handler has acknowledged the +/// request for write access. +/// +/// The [`ChainOrchestrator`] polls the [`ChainHandler`] to advance the chain and handles the +/// emitted events. Requests and events are passed to the [`ChainHandler`] via +/// [`ChainHandler::on_event`]. +#[must_use = "Stream does nothing unless polled"] +#[derive(Debug)] +pub struct ChainOrchestrator +where + T: ChainHandler, + P: BackfillSync, +{ + /// The handler for advancing the chain. + handler: T, + /// Controls backfill sync. + backfill_sync: P, +} + +impl ChainOrchestrator +where + T: ChainHandler + Unpin, + P: BackfillSync + Unpin, +{ + /// Creates a new [`ChainOrchestrator`] with the given handler and backfill sync. + pub const fn new(handler: T, backfill_sync: P) -> Self { + Self { handler, backfill_sync } + } + + /// Returns the handler + pub const fn handler(&self) -> &T { + &self.handler + } + + /// Returns a mutable reference to the handler + pub fn handler_mut(&mut self) -> &mut T { + &mut self.handler + } + + /// Internal function used to advance the chain. + /// + /// Polls the `ChainOrchestrator` for the next event. + #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] + fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + // This loop polls the components + // + // 1. Polls the backfill sync to completion, if active. + // 2. Advances the chain by polling the handler. + 'outer: loop { + // try to poll the backfill sync to completion, if active + match this.backfill_sync.poll(cx) { + Poll::Ready(backfill_sync_event) => match backfill_sync_event { + BackfillEvent::Idle => {} + BackfillEvent::Started(_) => { + // notify handler that backfill sync started + this.handler.on_event(FromOrchestrator::BackfillSyncStarted); + return Poll::Ready(ChainEvent::BackfillSyncStarted); + } + BackfillEvent::Finished(res) => { + return match res { + Ok(event) => { + tracing::debug!(?event, "backfill sync finished"); + // notify handler that backfill sync finished + this.handler.on_event(FromOrchestrator::BackfillSyncFinished); + Poll::Ready(ChainEvent::BackfillSyncFinished) + } + Err(err) => { + tracing::error!( %err, "backfill sync failed"); + Poll::Ready(ChainEvent::FatalError) + } + } + } + BackfillEvent::TaskDropped(err) => { + tracing::error!( %err, "backfill sync task dropped"); + return Poll::Ready(ChainEvent::FatalError); + } + }, + Poll::Pending => {} + } + + // poll the handler for the next event + match this.handler.poll(cx) { + Poll::Ready(handler_event) => { + match handler_event { + HandlerEvent::BackfillSync(target) => { + // trigger backfill sync and start polling it + this.backfill_sync.on_action(BackfillAction::Start(target)); + continue 'outer + } + HandlerEvent::Event(ev) => { + // bubble up the event + return Poll::Ready(ChainEvent::Handler(ev)); + } + } + } + Poll::Pending => { + // no more events to process + break 'outer + } + } + } + + Poll::Pending + } +} + +impl Stream for ChainOrchestrator +where + T: ChainHandler + Unpin, + P: BackfillSync + Unpin, +{ + type Item = ChainEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.as_mut().poll_next_event(cx).map(Some) + } +} + +/// Represents the sync mode the chain is operating in. +#[derive(Debug, Default)] +enum SyncMode { + #[default] + Handler, + Backfill, +} + +/// Event emitted by the [`ChainOrchestrator`] +/// +/// These are meant to be used for observability and debugging purposes. +#[derive(Debug)] +pub enum ChainEvent { + /// Backfill sync started + BackfillSyncStarted, + /// Backfill sync finished + BackfillSyncFinished, + /// Fatal error + FatalError, + /// Event emitted by the handler + Handler(T), +} + +/// A trait that advances the chain by handling actions. +/// +/// This is intended to be implement the chain consensus logic, for example `engine` API. +pub trait ChainHandler: Send + Sync { + /// Event generated by this handler that orchestrator can bubble up; + type Event: Send; + + /// Informs the handler about an event from the [`ChainOrchestrator`]. + fn on_event(&mut self, event: FromOrchestrator); + + /// Polls for actions that [`ChainOrchestrator`] should handle. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; +} + +/// Events/Requests that the [`ChainHandler`] can emit to the [`ChainOrchestrator`]. +#[derive(Clone, Debug)] +pub enum HandlerEvent { + /// Request to start a backfill sync + BackfillSync(PipelineTarget), + /// Other event emitted by the handler + Event(T), +} + +/// Internal events issued by the [`ChainOrchestrator`]. +#[derive(Clone, Debug)] +pub enum FromOrchestrator { + /// Invoked when backfill sync finished + BackfillSyncFinished, + /// Invoked when backfill sync started + BackfillSyncStarted, +} + +/// Represents the state of the chain. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +pub enum OrchestratorState { + /// Orchestrator has exclusive write access to the database. + BackfillSyncActive, + /// Node is actively processing the chain. + #[default] + Idle, +} + +impl OrchestratorState { + /// Returns `true` if the state is [`OrchestratorState::BackfillSyncActive`]. + pub const fn is_backfill_sync_active(&self) -> bool { + matches!(self, Self::BackfillSyncActive) + } + + /// Returns `true` if the state is [`OrchestratorState::Idle`]. + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } +} diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs new file mode 100644 index 000000000000..b8ebb8415c8d --- /dev/null +++ b/crates/engine/tree/src/download.rs @@ -0,0 +1,414 @@ +//! Handler that can download blocks on demand (e.g. from the network). + +use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics}; +use futures::FutureExt; +use reth_consensus::Consensus; +use reth_network_p2p::{ + bodies::client::BodiesClient, + full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, + headers::client::HeadersClient, +}; +use reth_primitives::{SealedBlock, SealedBlockWithSenders, B256}; +use std::{ + cmp::{Ordering, Reverse}, + collections::{binary_heap::PeekMut, BinaryHeap, HashSet}, + sync::Arc, + task::{Context, Poll}, +}; +use tracing::trace; + +/// A trait that can download blocks on demand. +pub trait BlockDownloader: Send + Sync { + /// Handle an action. + fn on_action(&mut self, action: DownloadAction); + + /// Advance in progress requests if any + fn poll(&mut self, cx: &mut Context<'_>) -> Poll; +} + +/// Actions that can be performed by the block downloader. +#[derive(Debug)] +pub enum DownloadAction { + /// Stop downloading blocks. + Clear, + /// Download given blocks + Download(DownloadRequest), +} + +/// Outcome of downloaded blocks. +#[derive(Debug)] +pub enum DownloadOutcome { + /// Downloaded blocks. + Blocks(Vec), +} + +/// Basic [`BlockDownloader`]. +pub struct BasicBlockDownloader +where + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// A downloader that can download full blocks from the network. + full_block_client: FullBlockClient, + /// In-flight full block requests in progress. + inflight_full_block_requests: Vec>, + /// In-flight full block _range_ requests in progress. + inflight_block_range_requests: Vec>, + /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for + /// ordering. This means the blocks will be popped from the heap with ascending block numbers. + set_buffered_blocks: BinaryHeap>, + /// Engine download metrics. + metrics: BlockDownloaderMetrics, +} + +impl BasicBlockDownloader +where + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// Create a new instance + pub fn new(client: Client, consensus: Arc) -> Self { + Self { + full_block_client: FullBlockClient::new(client, consensus), + inflight_full_block_requests: Vec::new(), + inflight_block_range_requests: Vec::new(), + set_buffered_blocks: BinaryHeap::new(), + metrics: BlockDownloaderMetrics::default(), + } + } + + /// Clears the stored inflight requests. + fn clear(&mut self) { + self.inflight_full_block_requests.clear(); + self.inflight_block_range_requests.clear(); + self.set_buffered_blocks.clear(); + self.update_block_download_metrics(); + } + + /// Processes a download request. + fn download(&mut self, request: DownloadRequest) { + match request { + DownloadRequest::BlockSet(hashes) => self.download_block_set(hashes), + DownloadRequest::BlockRange(hash, count) => self.download_block_range(hash, count), + } + } + + /// Processes a block set download request. + fn download_block_set(&mut self, hashes: HashSet) { + for hash in hashes { + self.download_full_block(hash); + } + } + + /// Processes a block range download request. + fn download_block_range(&mut self, hash: B256, count: u64) { + if count == 1 { + self.download_full_block(hash); + } else { + trace!( + target: "consensus::engine", + ?hash, + ?count, + "start downloading full block range." + ); + + let request = self.full_block_client.get_full_block_range(hash, count); + self.inflight_block_range_requests.push(request); + } + } + + /// Starts requesting a full block from the network. + /// + /// Returns `true` if the request was started, `false` if there's already a request for the + /// given hash. + fn download_full_block(&mut self, hash: B256) -> bool { + if self.is_inflight_request(hash) { + return false + } + trace!( + target: "consensus::engine::sync", + ?hash, + "Start downloading full block" + ); + + let request = self.full_block_client.get_full_block(hash); + self.inflight_full_block_requests.push(request); + + self.update_block_download_metrics(); + + true + } + + /// Returns true if there's already a request for the given hash. + fn is_inflight_request(&self, hash: B256) -> bool { + self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash) + } + + /// Sets the metrics for the active downloads + fn update_block_download_metrics(&self) { + self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); + // TODO: full block range metrics + } +} + +impl BlockDownloader for BasicBlockDownloader +where + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// Handles incoming download actions. + fn on_action(&mut self, action: DownloadAction) { + match action { + DownloadAction::Clear => self.clear(), + DownloadAction::Download(request) => self.download(request), + } + } + + /// Advances the download process. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + // advance all full block requests + for idx in (0..self.inflight_full_block_requests.len()).rev() { + let mut request = self.inflight_full_block_requests.swap_remove(idx); + if let Poll::Ready(block) = request.poll_unpin(cx) { + trace!(target: "consensus::engine", block=?block.num_hash(), "Received single full block, buffering"); + self.set_buffered_blocks.push(Reverse(block.into())); + } else { + // still pending + self.inflight_full_block_requests.push(request); + } + } + + // advance all full block range requests + for idx in (0..self.inflight_block_range_requests.len()).rev() { + let mut request = self.inflight_block_range_requests.swap_remove(idx); + if let Poll::Ready(blocks) = request.poll_unpin(cx) { + trace!(target: "consensus::engine", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); + self.set_buffered_blocks.extend( + blocks + .into_iter() + .map(|b| { + let senders = b.senders().unwrap_or_default(); + OrderedSealedBlockWithSenders(SealedBlockWithSenders { + block: b, + senders, + }) + }) + .map(Reverse), + ); + } else { + // still pending + self.inflight_block_range_requests.push(request); + } + } + + self.update_block_download_metrics(); + + if self.set_buffered_blocks.is_empty() { + return Poll::Pending; + } + + // drain all unique element of the block buffer if there are any + let mut downloaded_blocks: Vec = + Vec::with_capacity(self.set_buffered_blocks.len()); + while let Some(block) = self.set_buffered_blocks.pop() { + // peek ahead and pop duplicates + while let Some(peek) = self.set_buffered_blocks.peek_mut() { + if peek.0 .0.hash() == block.0 .0.hash() { + PeekMut::pop(peek); + } else { + break + } + } + downloaded_blocks.push(block.0.into()); + } + Poll::Ready(DownloadOutcome::Blocks(downloaded_blocks)) + } +} + +/// A wrapper type around [`SealedBlockWithSenders`] that implements the [Ord] +/// trait by block number. +#[derive(Debug, Clone, PartialEq, Eq)] +struct OrderedSealedBlockWithSenders(SealedBlockWithSenders); + +impl PartialOrd for OrderedSealedBlockWithSenders { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for OrderedSealedBlockWithSenders { + fn cmp(&self, other: &Self) -> Ordering { + self.0.number.cmp(&other.0.number) + } +} + +impl From for OrderedSealedBlockWithSenders { + fn from(block: SealedBlock) -> Self { + let senders = block.senders().unwrap_or_default(); + Self(SealedBlockWithSenders { block, senders }) + } +} + +impl From for SealedBlockWithSenders { + fn from(value: OrderedSealedBlockWithSenders) -> Self { + let senders = value.0.senders; + Self { block: value.0.block, senders } + } +} + +/// A [`BlockDownloader`] that does nothing. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct NoopBlockDownloader; + +impl BlockDownloader for NoopBlockDownloader { + fn on_action(&mut self, _event: DownloadAction) {} + + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + Poll::Pending + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::insert_headers_into_client; + use assert_matches::assert_matches; + use reth_beacon_consensus::EthBeaconConsensus; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, Header}; + use std::{future::poll_fn, sync::Arc}; + + struct TestHarness { + block_downloader: BasicBlockDownloader, + client: TestFullBlockClient, + } + + impl TestHarness { + fn new(total_blocks: usize) -> Self { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + let client = TestFullBlockClient::default(); + let header = Header { + base_fee_per_gas: Some(7), + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + ..Default::default() + } + .seal_slow(); + + insert_headers_into_client(&client, header, 0..total_blocks); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); + + let block_downloader = BasicBlockDownloader::new(client.clone(), consensus); + Self { block_downloader, client } + } + } + + #[tokio::test] + async fn block_downloader_range_request() { + const TOTAL_BLOCKS: usize = 10; + let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS); + let tip = client.highest_block().expect("there should be blocks here"); + + // send block range download request + block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockRange( + tip.hash(), + tip.number, + ))); + + // ensure we have one in flight range request + assert_eq!(block_downloader.inflight_block_range_requests.len(), 1); + + // ensure the range request is made correctly + let first_req = block_downloader.inflight_block_range_requests.first().unwrap(); + assert_eq!(first_req.start_hash(), tip.hash()); + assert_eq!(first_req.count(), tip.number); + + // poll downloader + let sync_future = poll_fn(|cx| block_downloader.poll(cx)); + let next_ready = sync_future.await; + + assert_matches!(next_ready, DownloadOutcome::Blocks(blocks) => { + // ensure all blocks were obtained + assert_eq!(blocks.len(), TOTAL_BLOCKS); + + // ensure they are in ascending order + for num in 1..=TOTAL_BLOCKS { + assert_eq!(blocks[num-1].number, num as u64); + } + }); + } + + #[tokio::test] + async fn block_downloader_set_request() { + const TOTAL_BLOCKS: usize = 2; + let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS); + + let tip = client.highest_block().expect("there should be blocks here"); + + // send block set download request + block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockSet( + HashSet::from([tip.hash(), tip.parent_hash]), + ))); + + // ensure we have TOTAL_BLOCKS in flight full block request + assert_eq!(block_downloader.inflight_full_block_requests.len(), TOTAL_BLOCKS); + + // poll downloader + let sync_future = poll_fn(|cx| block_downloader.poll(cx)); + let next_ready = sync_future.await; + + assert_matches!(next_ready, DownloadOutcome::Blocks(blocks) => { + // ensure all blocks were obtained + assert_eq!(blocks.len(), TOTAL_BLOCKS); + + // ensure they are in ascending order + for num in 1..=TOTAL_BLOCKS { + assert_eq!(blocks[num-1].number, num as u64); + } + }); + } + + #[tokio::test] + async fn block_downloader_clear_request() { + const TOTAL_BLOCKS: usize = 10; + let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS); + + let tip = client.highest_block().expect("there should be blocks here"); + + // send block range download request + block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockRange( + tip.hash(), + tip.number, + ))); + + // send block set download request + let download_set = HashSet::from([tip.hash(), tip.parent_hash]); + block_downloader + .on_action(DownloadAction::Download(DownloadRequest::BlockSet(download_set.clone()))); + + // ensure we have one in flight range request + assert_eq!(block_downloader.inflight_block_range_requests.len(), 1); + + // ensure the range request is made correctly + let first_req = block_downloader.inflight_block_range_requests.first().unwrap(); + assert_eq!(first_req.start_hash(), tip.hash()); + assert_eq!(first_req.count(), tip.number); + + // ensure we have download_set.len() in flight full block request + assert_eq!(block_downloader.inflight_full_block_requests.len(), download_set.len()); + + // send clear request + block_downloader.on_action(DownloadAction::Clear); + + // ensure we have no in flight range request + assert_eq!(block_downloader.inflight_block_range_requests.len(), 0); + + // ensure we have no in flight full block request + assert_eq!(block_downloader.inflight_full_block_requests.len(), 0); + } +} diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs new file mode 100644 index 000000000000..9b965e892268 --- /dev/null +++ b/crates/engine/tree/src/engine.rs @@ -0,0 +1,226 @@ +//! An engine API handler for the chain. + +use crate::{ + chain::{ChainHandler, FromOrchestrator, HandlerEvent}, + download::{BlockDownloader, DownloadAction, DownloadOutcome}, + tree::TreeEvent, +}; +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_primitives::{SealedBlockWithSenders, B256}; +use std::{ + collections::HashSet, + sync::mpsc::Sender, + task::{Context, Poll}, +}; +use tokio::sync::mpsc::UnboundedReceiver; + +/// Advances the chain based on incoming requests. +/// +/// This is a general purpose request handler with network access. +/// This type listens for incoming messages and processes them via the configured request handler. +/// +/// ## Overview +/// +/// This type is an orchestrator for incoming messages and responsible for delegating requests +/// received from the CL to the handler. +/// +/// It is responsible for handling the following: +/// - Downloading blocks on demand from the network if requested by the [`EngineApiRequestHandler`]. +/// +/// The core logic is part of the [`EngineRequestHandler`], which is responsible for processing the +/// incoming requests. +#[derive(Debug)] +pub struct EngineHandler { + /// Processes requests. + /// + /// This type is responsible for processing incoming requests. + handler: T, + /// Receiver for incoming requests that need to be processed. + incoming_requests: S, + /// A downloader to download blocks on demand. + downloader: D, +} + +impl EngineHandler { + /// Creates a new [`EngineHandler`] with the given handler and downloader. + pub const fn new(handler: T, downloader: D, incoming_requests: S) -> Self + where + T: EngineRequestHandler, + { + Self { handler, incoming_requests, downloader } + } +} + +impl ChainHandler for EngineHandler +where + T: EngineRequestHandler, + S: Stream + Send + Sync + Unpin + 'static, + D: BlockDownloader, +{ + type Event = T::Event; + + fn on_event(&mut self, event: FromOrchestrator) { + // delegate event to the handler + self.handler.on_event(event.into()); + } + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + // drain the handler first + while let Poll::Ready(ev) = self.handler.poll(cx) { + match ev { + RequestHandlerEvent::Idle => break, + RequestHandlerEvent::HandlerEvent(ev) => { + return match ev { + HandlerEvent::BackfillSync(target) => { + // bubble up backfill sync request request + self.downloader.on_action(DownloadAction::Clear); + Poll::Ready(HandlerEvent::BackfillSync(target)) + } + HandlerEvent::Event(ev) => { + // bubble up the event + Poll::Ready(HandlerEvent::Event(ev)) + } + } + } + RequestHandlerEvent::Download(req) => { + // delegate download request to the downloader + self.downloader.on_action(DownloadAction::Download(req)); + } + } + } + + // pop the next incoming request + if let Poll::Ready(Some(req)) = self.incoming_requests.poll_next_unpin(cx) { + // and delegate the request to the handler + self.handler.on_event(FromEngine::Request(req)); + // skip downloading in this iteration to allow the handler to process the request + continue + } + + // advance the downloader + if let Poll::Ready(DownloadOutcome::Blocks(blocks)) = self.downloader.poll(cx) { + // delegate the downloaded blocks to the handler + self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + continue + } + + return Poll::Pending + } + } +} + +/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API) +pub trait EngineRequestHandler: Send + Sync { + /// Even type this handler can emit + type Event: Send; + /// The request type this handler can process. + type Request; + + /// Informs the handler about an event from the [`EngineHandler`]. + fn on_event(&mut self, event: FromEngine); + + /// Advances the handler. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; +} + +/// An [`EngineRequestHandler`] that processes engine API requests by delegating to an execution +/// task. +/// +/// This type is responsible for advancing the chain during live sync (following the tip of the +/// chain). +/// +/// It advances the chain based on received engine API requests by delegating them to the tree +/// executor. +/// +/// There are two types of requests that can be processed: +/// +/// - `on_new_payload`: Executes the payload and inserts it into the tree. These are allowed to be +/// processed concurrently. +/// - `on_forkchoice_updated`: Updates the fork choice based on the new head. These require write +/// access to the database and are skipped if the handler can't acquire exclusive access to the +/// database. +/// +/// In case required blocks are missing, the handler will request them from the network, by emitting +/// a download request upstream. +#[derive(Debug)] +pub struct EngineApiRequestHandler { + /// channel to send messages to the tree to execute the payload. + to_tree: Sender>>, + /// channel to receive messages from the tree. + from_tree: UnboundedReceiver, +} + +impl EngineApiRequestHandler +where + T: EngineTypes, +{ + pub const fn new( + to_tree: Sender>>, + from_tree: UnboundedReceiver, + ) -> Self { + Self { to_tree, from_tree } + } +} + +impl EngineRequestHandler for EngineApiRequestHandler +where + T: EngineTypes, +{ + type Event = EngineApiEvent; + type Request = BeaconEngineMessage; + + fn on_event(&mut self, event: FromEngine) { + // delegate to the tree + let _ = self.to_tree.send(event); + } + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { + todo!("poll tree") + } +} + +/// Events emitted by the engine API handler. +#[derive(Debug)] +pub enum EngineApiEvent { + /// Bubbled from tree. + FromTree(TreeEvent), +} + +#[derive(Debug)] +pub enum FromEngine { + /// Event from the top level orchestrator. + Event(FromOrchestrator), + /// Request from the engine + Request(Req), + /// Downloaded blocks from the network. + DownloadedBlocks(Vec), +} + +impl From for FromEngine { + fn from(event: FromOrchestrator) -> Self { + Self::Event(event) + } +} + +/// Requests produced by a [`EngineRequestHandler`]. +#[derive(Debug)] +pub enum RequestHandlerEvent { + /// The handler is idle. + Idle, + /// An event emitted by the handler. + HandlerEvent(HandlerEvent), + /// Request to download blocks. + Download(DownloadRequest), +} + +/// A request to download blocks from the network. +#[derive(Debug)] +pub enum DownloadRequest { + /// Download the given set of blocks. + BlockSet(HashSet), + /// Download the given range of blocks. + BlockRange(B256, u64), +} diff --git a/crates/engine/tree/src/lib.rs b/crates/engine/tree/src/lib.rs new file mode 100644 index 000000000000..8f40119b2cc0 --- /dev/null +++ b/crates/engine/tree/src/lib.rs @@ -0,0 +1,35 @@ +//! This crate includes the core components for advancing a reth chain. +//! +//! ## Feature Flags +//! +//! - `test-utils`: Export utilities for testing + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// #![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![allow(missing_docs, dead_code, missing_debug_implementations, unused_variables)] // TODO rm + +/// Re-export of the blockchain tree API. +pub use reth_blockchain_tree_api::*; + +/// Support for backfill sync mode. +pub mod backfill; +/// The type that drives the chain forward. +pub mod chain; +/// Support for downloading blocks on demand for live sync. +pub mod download; +/// Engine Api chain handler support. +pub mod engine; +/// Metrics support. +pub mod metrics; +/// The background writer task for batch db writes. +pub mod persistence; +/// Support for interacting with the blockchain tree. +pub mod tree; + +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/engine/tree/src/metrics.rs b/crates/engine/tree/src/metrics.rs new file mode 100644 index 000000000000..9579affe690f --- /dev/null +++ b/crates/engine/tree/src/metrics.rs @@ -0,0 +1,9 @@ +use reth_metrics::{metrics::Gauge, Metrics}; + +/// Metrics for the `BasicBlockDownloader`. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct BlockDownloaderMetrics { + /// How many blocks are currently being downloaded. + pub(crate) active_block_downloads: Gauge, +} diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs new file mode 100644 index 000000000000..23b3a5827c59 --- /dev/null +++ b/crates/engine/tree/src/persistence.rs @@ -0,0 +1,266 @@ +#![allow(dead_code)] + +use crate::tree::ExecutedBlock; +use reth_db::database::Database; +use reth_errors::ProviderResult; +use reth_primitives::B256; +use reth_provider::{ + bundle_state::HashedStateChanges, BlockWriter, HistoryWriter, OriginalValuesKnown, + ProviderFactory, StageCheckpointWriter, StateWriter, +}; +use reth_prune::{PruneProgress, Pruner}; +use std::sync::mpsc::{Receiver, SendError, Sender}; +use tokio::sync::oneshot; +use tracing::debug; + +/// Writes parts of reth's in memory tree state to the database. +/// +/// This is meant to be a spawned task that listens for various incoming persistence operations, +/// performing those actions on disk, and returning the result in a channel. +/// +/// There are two types of operations this task can perform: +/// - Writing executed blocks to disk, returning the hash of the latest block that was inserted. +/// - Removing blocks from disk, returning the removed blocks. +/// +/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs +/// blocking database operations in an endless loop. +#[derive(Debug)] +pub struct Persistence { + /// The db / static file provider to use + provider: ProviderFactory, + /// Incoming requests to persist stuff + incoming: Receiver, + /// The pruner + pruner: Pruner, +} + +impl Persistence { + /// Create a new persistence task + const fn new( + provider: ProviderFactory, + incoming: Receiver, + pruner: Pruner, + ) -> Self { + Self { provider, incoming, pruner } + } + + /// Writes the cloned tree state to the database + fn write(&self, blocks: Vec) -> ProviderResult<()> { + let provider_rw = self.provider.provider_rw()?; + + if blocks.is_empty() { + debug!(target: "tree::persistence", "Attempted to write empty block range"); + return Ok(()) + } + + let first_number = blocks.first().unwrap().block().number; + + let last = blocks.last().unwrap().block(); + let last_block_number = last.number; + + // TODO: remove all the clones and do performant / batched writes for each type of object + // instead of a loop over all blocks, + // meaning: + // * blocks + // * state + // * hashed state + // * trie updates (cannot naively extend, need helper) + // * indices (already done basically) + // Insert the blocks + for block in blocks { + let sealed_block = + block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); + provider_rw.insert_block(sealed_block)?; + + // Write state and changesets to the database. + // Must be written after blocks because of the receipt lookup. + let execution_outcome = block.execution_outcome().clone(); + execution_outcome.write_to_storage( + provider_rw.tx_ref(), + None, + OriginalValuesKnown::No, + )?; + + // insert hashes and intermediate merkle nodes + { + let trie_updates = block.trie_updates().clone(); + let hashed_state = block.hashed_state(); + HashedStateChanges(hashed_state.clone()).write_to_db(provider_rw.tx_ref())?; + trie_updates.write_to_database(provider_rw.tx_ref())?; + } + + // update history indices + provider_rw.update_history_indices(first_number..=last_block_number)?; + + // Update pipeline progress + provider_rw.update_pipeline_stages(last_block_number, false)?; + } + + debug!(target: "tree::persistence", range = ?first_number..=last_block_number, "Appended blocks"); + + Ok(()) + } + + /// Removes the blocks above the give block number from the database, returning them. + fn remove_blocks_above(&self, _block_number: u64) -> Vec { + todo!("implement this") + } + + /// Prunes block data before the given block hash according to the configured prune + /// configuration. + fn prune_before(&mut self, block_num: u64) -> PruneProgress { + // TODO: doing this properly depends on pruner segment changes + self.pruner.run(block_num).expect("todo: handle errors") + } + + /// Removes static file related data from the database, depending on the current block height in + /// existing static files. + fn clean_static_file_duplicates(&self) { + todo!("implement this") + } +} + +impl Persistence +where + DB: Database + 'static, +{ + /// Create a new persistence task, spawning it, and returning a [`PersistenceHandle`]. + fn spawn_new(provider: ProviderFactory, pruner: Pruner) -> PersistenceHandle { + let (tx, rx) = std::sync::mpsc::channel(); + let task = Self::new(provider, rx, pruner); + std::thread::Builder::new() + .name("Persistence Task".to_string()) + .spawn(|| task.run()) + .unwrap(); + + PersistenceHandle::new(tx) + } +} + +impl Persistence +where + DB: Database, +{ + /// This is the main loop, that will listen to persistence events and perform the requested + /// database actions + fn run(mut self) { + // If the receiver errors then senders have disconnected, so the loop should then end. + while let Ok(action) = self.incoming.recv() { + match action { + PersistenceAction::RemoveBlocksAbove((new_tip_num, sender)) => { + let output = self.remove_blocks_above(new_tip_num); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(output); + } + PersistenceAction::SaveBlocks((blocks, sender)) => { + if blocks.is_empty() { + todo!("return error or something"); + } + let last_block_hash = blocks.last().unwrap().block().hash(); + self.write(blocks).unwrap(); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(last_block_hash); + } + PersistenceAction::PruneBefore((block_num, sender)) => { + let res = self.prune_before(block_num); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(res); + } + PersistenceAction::CleanStaticFileDuplicates(sender) => { + self.clean_static_file_duplicates(); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(()); + } + } + } + } +} + +/// A signal to the persistence task that part of the tree state can be persisted. +#[derive(Debug)] +pub enum PersistenceAction { + /// The section of tree state that should be persisted. These blocks are expected in order of + /// increasing block number. + SaveBlocks((Vec, oneshot::Sender)), + + /// Removes the blocks above the given block number from the database. + RemoveBlocksAbove((u64, oneshot::Sender>)), + + /// Prune associated block data before the given block number, according to already-configured + /// prune modes. + PruneBefore((u64, oneshot::Sender)), + + /// Trigger a read of static file data, and delete data depending on the highest block in each + /// static file segment. + CleanStaticFileDuplicates(oneshot::Sender<()>), +} + +/// A handle to the persistence task +#[derive(Debug, Clone)] +pub struct PersistenceHandle { + /// The channel used to communicate with the persistence task + sender: Sender, +} + +impl PersistenceHandle { + /// Create a new [`PersistenceHandle`] from a [`Sender`]. + pub const fn new(sender: Sender) -> Self { + Self { sender } + } + + /// Sends a specific [`PersistenceAction`] in the contained channel. The caller is responsible + /// for creating any channels for the given action. + pub fn send_action( + &self, + action: PersistenceAction, + ) -> Result<(), SendError> { + self.sender.send(action) + } + + /// Tells the persistence task to save a certain list of finalized blocks. The blocks are + /// assumed to be ordered by block number. + /// + /// This returns the latest hash that has been saved, allowing removal of that block and any + /// previous blocks from in-memory data structures. + pub async fn save_blocks(&self, blocks: Vec) -> B256 { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::SaveBlocks((blocks, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the persistence task to remove blocks above a certain block number. The removed blocks + /// are returned by the task. + pub async fn remove_blocks_above(&self, block_num: u64) -> Vec { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::RemoveBlocksAbove((block_num, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the persistence task to remove block data before the given hash, according to the + /// configured prune config. + pub async fn prune_before(&self, block_num: u64) -> PruneProgress { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::PruneBefore((block_num, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the persistence task to read static file data, and delete data depending on the + /// highest block in each static file segment. + pub async fn clean_static_file_duplicates(&self) { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::CleanStaticFileDuplicates(tx)) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } +} diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs new file mode 100644 index 000000000000..0a5fbd5ad560 --- /dev/null +++ b/crates/engine/tree/src/test_utils.rs @@ -0,0 +1,77 @@ +use reth_chainspec::ChainSpec; +use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; +use reth_network_p2p::test_utils::TestFullBlockClient; +use reth_primitives::{BlockBody, SealedHeader, B256}; +use reth_provider::{test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome}; +use reth_prune_types::PruneModes; +use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; +use reth_stages_api::Pipeline; +use reth_static_file::StaticFileProducer; +use std::{collections::VecDeque, ops::Range, sync::Arc}; +use tokio::sync::watch; + +/// Test pipeline builder. +#[derive(Default)] +pub struct TestPipelineBuilder { + pipeline_exec_outputs: VecDeque>, + executor_results: Vec, +} + +impl TestPipelineBuilder { + /// Create a new [`TestPipelineBuilder`]. + pub const fn new() -> Self { + Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() } + } + + /// Set the pipeline execution outputs to use for the test consensus engine. + pub fn with_pipeline_exec_outputs( + mut self, + pipeline_exec_outputs: VecDeque>, + ) -> Self { + self.pipeline_exec_outputs = pipeline_exec_outputs; + self + } + + /// Set the executor results to use for the test consensus engine. + #[allow(dead_code)] + pub fn with_executor_results(mut self, executor_results: Vec) -> Self { + self.executor_results = executor_results; + self + } + + /// Builds the pipeline. + pub fn build(self, chain_spec: Arc) -> Pipeline>> { + reth_tracing::init_test_tracing(); + + // Setup pipeline + let (tip_tx, _tip_rx) = watch::channel(B256::default()); + let pipeline = Pipeline::builder() + .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) + .with_tip_sender(tip_tx); + + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); + + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + + pipeline.build(provider_factory, static_file_producer) + } +} + +pub(crate) fn insert_headers_into_client( + client: &TestFullBlockClient, + genesis_header: SealedHeader, + range: Range, +) { + let mut sealed_header = genesis_header; + let body = BlockBody::default(); + for _ in range { + let (mut header, hash) = sealed_header.split(); + // update to the next header + header.parent_hash = hash; + header.number += 1; + header.timestamp += 1; + sealed_header = header.seal_slow(); + client.insert(sealed_header.clone(), body.clone()); + } +} diff --git a/crates/engine/tree/src/tree/memory_overlay.rs b/crates/engine/tree/src/tree/memory_overlay.rs new file mode 100644 index 000000000000..f11eece8e7d0 --- /dev/null +++ b/crates/engine/tree/src/tree/memory_overlay.rs @@ -0,0 +1,135 @@ +use super::ExecutedBlock; +use reth_errors::ProviderResult; +use reth_primitives::{Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256}; +use reth_provider::{ + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, +}; +use reth_trie::{updates::TrieUpdates, AccountProof}; +use revm::db::BundleState; + +/// A state provider that stores references to in-memory blocks along with their state as well as +/// the historical state provider for fallback lookups. +#[derive(Debug)] +pub struct MemoryOverlayStateProvider { + /// The collection of executed parent blocks. + in_memory: Vec, + /// Historical state provider for state lookups that are not found in in-memory blocks. + historical: H, +} + +impl MemoryOverlayStateProvider { + /// Create new memory overlay state provider. + pub const fn new(in_memory: Vec, historical: H) -> Self { + Self { in_memory, historical } + } +} + +impl BlockHashReader for MemoryOverlayStateProvider +where + H: BlockHashReader, +{ + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if block.block.number == number { + return Ok(Some(block.block.hash())) + } + } + + self.historical.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + let range = start..end; + let mut earliest_block_number = None; + let mut in_memory_hashes = Vec::new(); + for block in self.in_memory.iter().rev() { + if range.contains(&block.block.number) { + in_memory_hashes.insert(0, block.block.hash()); + earliest_block_number = Some(block.block.number); + } + } + + let mut hashes = + self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; + hashes.append(&mut in_memory_hashes); + Ok(hashes) + } +} + +impl AccountReader for MemoryOverlayStateProvider +where + H: AccountReader + Send, +{ + fn basic_account(&self, address: Address) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if let Some(account) = block.execution_output.account(&address) { + return Ok(account) + } + } + + self.historical.basic_account(address) + } +} + +impl StateRootProvider for MemoryOverlayStateProvider +where + H: StateRootProvider + Send, +{ + fn state_root(&self, bundle_state: &BundleState) -> ProviderResult { + todo!() + } + + fn state_root_with_updates( + &self, + bundle_state: &BundleState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } +} + +impl StateProofProvider for MemoryOverlayStateProvider +where + H: StateProofProvider + Send, +{ + fn proof( + &self, + state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + todo!() + } +} + +impl StateProvider for MemoryOverlayStateProvider +where + H: StateProvider + Send, +{ + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { + return Ok(Some(value)) + } + } + + self.historical.storage(address, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if let Some(contract) = block.execution_output.bytecode(&code_hash) { + return Ok(Some(contract)) + } + } + + self.historical.bytecode_by_hash(code_hash) + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs new file mode 100644 index 000000000000..8afed31043c0 --- /dev/null +++ b/crates/engine/tree/src/tree/mod.rs @@ -0,0 +1,757 @@ +use crate::{ + backfill::BackfillAction, + chain::FromOrchestrator, + engine::{DownloadRequest, EngineApiEvent, FromEngine}, +}; +use reth_beacon_consensus::{ + BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, OnForkChoiceUpdated, +}; +use reth_blockchain_tree::{ + error::InsertBlockErrorKind, BlockAttachment, BlockBuffer, BlockStatus, +}; +use reth_blockchain_tree_api::{error::InsertBlockError, InsertPayloadOk}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_engine_primitives::EngineTypes; +use reth_errors::{ConsensusError, ProviderResult}; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_primitives::PayloadTypes; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{ + Address, Block, BlockNumber, GotExpected, Receipts, Requests, SealedBlock, + SealedBlockWithSenders, B256, U256, +}; +use reth_provider::{ + BlockReader, ExecutionOutcome, StateProvider, StateProviderFactory, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_types::{ + engine::{ + CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, + }, + ExecutionPayload, +}; +use reth_trie::{updates::TrieUpdates, HashedPostState}; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::{mpsc::Receiver, Arc}, +}; +use tokio::sync::mpsc::UnboundedSender; +use tracing::*; + +mod memory_overlay; +pub use memory_overlay::MemoryOverlayStateProvider; + +/// Represents an executed block stored in-memory. +#[derive(Clone, Debug)] +pub struct ExecutedBlock { + block: Arc, + senders: Arc>, + execution_output: Arc, + hashed_state: Arc, + trie: Arc, +} + +impl ExecutedBlock { + /// Returns a reference to the executed block. + pub(crate) fn block(&self) -> &SealedBlock { + &self.block + } + + /// Returns a reference to the block's senders + pub(crate) fn senders(&self) -> &Vec

{ + &self.senders + } + + /// Returns a reference to the block's execution outcome + pub(crate) fn execution_outcome(&self) -> &ExecutionOutcome { + &self.execution_output + } + + /// Returns a reference to the hashed state result of the execution outcome + pub(crate) fn hashed_state(&self) -> &HashedPostState { + &self.hashed_state + } + + /// Returns a reference to the trie updates for the block + pub(crate) fn trie_updates(&self) -> &TrieUpdates { + &self.trie + } +} + +/// Keeps track of the state of the tree. +#[derive(Debug, Default)] +pub struct TreeState { + /// All executed blocks by hash. + blocks_by_hash: HashMap, + /// Executed blocks grouped by their respective block number. + blocks_by_number: BTreeMap>, +} + +impl TreeState { + fn block_by_hash(&self, hash: B256) -> Option> { + self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) + } + + /// Insert executed block into the state. + fn insert_executed(&mut self, executed: ExecutedBlock) { + self.blocks_by_number.entry(executed.block.number).or_default().push(executed.clone()); + let existing = self.blocks_by_hash.insert(executed.block.hash(), executed); + debug_assert!(existing.is_none(), "inserted duplicate block"); + } + + /// Remove blocks before specified block number. + pub(crate) fn remove_before(&mut self, block_number: BlockNumber) { + while self + .blocks_by_number + .first_key_value() + .map(|entry| entry.0 < &block_number) + .unwrap_or_default() + { + let (_, to_remove) = self.blocks_by_number.pop_first().unwrap(); + for block in to_remove { + let block_hash = block.block.hash(); + let removed = self.blocks_by_hash.remove(&block_hash); + debug_assert!( + removed.is_some(), + "attempted to remove non-existing block {block_hash}" + ); + } + } + } +} + +/// Tracks the state of the engine api internals. +/// +/// This type is shareable. +#[derive(Debug)] +pub struct EngineApiTreeState { + /// Tracks the state of the blockchain tree. + tree_state: TreeState, + /// Tracks the received forkchoice state updates received by the CL. + forkchoice_state_tracker: ForkchoiceStateTracker, + /// Buffer of detached blocks. + buffer: BlockBuffer, + /// Tracks the header of invalid payloads that were rejected by the engine because they're + /// invalid. + invalid_headers: InvalidHeaderCache, +} + +impl EngineApiTreeState { + fn new(block_buffer_limit: u32, max_invalid_header_cache_length: u32) -> Self { + Self { + invalid_headers: InvalidHeaderCache::new(max_invalid_header_cache_length), + buffer: BlockBuffer::new(block_buffer_limit), + tree_state: TreeState::default(), + forkchoice_state_tracker: ForkchoiceStateTracker::default(), + } + } +} + +/// The type responsible for processing engine API requests. +/// +/// TODO: design: should the engine handler functions also accept the response channel or return the +/// result and the caller redirects the response +pub trait EngineApiTreeHandler { + /// The engine type that this handler is for. + type Engine: EngineTypes; + + /// Invoked when previously requested blocks were downloaded. + fn on_downloaded(&mut self, blocks: Vec) -> Option; + + /// When the Consensus layer receives a new block via the consensus gossip protocol, + /// the transactions in the block are sent to the execution layer in the form of a + /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the + /// state in the block header, then passes validation data back to Consensus layer, that + /// adds the block to the head of its own blockchain and attests to it. The block is then + /// broadcast over the consensus p2p network in the form of a "Beacon block". + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). + /// + /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and + /// returns an error if an internal error occurred. + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult>; + + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). + /// + /// Returns an error if an internal error occurred like a database error. + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult>; +} + +/// The outcome of a tree operation. +#[derive(Debug)] +pub struct TreeOutcome { + /// The outcome of the operation. + pub outcome: T, + /// An optional event to tell the caller to do something. + pub event: Option, +} + +impl TreeOutcome { + /// Create new tree outcome. + pub const fn new(outcome: T) -> Self { + Self { outcome, event: None } + } + + /// Set event on the outcome. + pub fn with_event(mut self, event: TreeEvent) -> Self { + self.event = Some(event); + self + } +} + +/// Events that can be emitted by the [`EngineApiTreeHandler`]. +#[derive(Debug)] +pub enum TreeEvent { + /// Tree action is needed. + TreeAction(TreeAction), + /// Backfill action is needed. + BackfillAction(BackfillAction), + /// Block download is needed. + Download(DownloadRequest), +} + +/// The actions that can be performed on the tree. +#[derive(Debug)] +pub enum TreeAction { + /// Make target canonical. + MakeCanonical(B256), +} + +#[derive(Debug)] +pub struct EngineApiTreeHandlerImpl { + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + state: EngineApiTreeState, + incoming: Receiver>>, + outgoing: UnboundedSender, + /// (tmp) The flag indicating whether the pipeline is active. + is_pipeline_active: bool, + _marker: PhantomData, +} + +impl EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes + 'static, +{ + #[allow(clippy::too_many_arguments)] + fn new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + outgoing: UnboundedSender, + state: EngineApiTreeState, + ) -> Self { + Self { + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing, + is_pipeline_active: false, + state, + _marker: PhantomData, + } + } + + #[allow(clippy::too_many_arguments)] + fn spawn_new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + state: EngineApiTreeState, + ) -> UnboundedSender { + let (outgoing, rx) = tokio::sync::mpsc::unbounded_channel(); + let task = Self::new( + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing.clone(), + state, + ); + std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); + outgoing + } + + fn run(mut self) { + loop { + while let Ok(msg) = self.incoming.recv() { + match msg { + FromEngine::Event(event) => match event { + FromOrchestrator::BackfillSyncFinished => { + todo!() + } + FromOrchestrator::BackfillSyncStarted => { + todo!() + } + }, + FromEngine::Request(request) => match request { + BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + let output = self.on_forkchoice_updated(state, payload_attrs); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) + { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { + let output = self.on_new_payload(payload, cancun_fields); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { + reth_beacon_consensus::BeaconOnNewPayloadError::Internal(Box::new( + e, + )) + })) { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::TransitionConfigurationExchanged => { + todo!() + } + }, + FromEngine::DownloadedBlocks(blocks) => { + if let Some(event) = self.on_downloaded(blocks) { + if let Err(err) = self.outgoing.send(EngineApiEvent::FromTree(event)) { + error!("Failed to send event: {err:?}"); + } + } + } + } + } + } + } + + /// Return block from database or in-memory state by hash. + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + // check database first + let mut block = self.provider.block_by_hash(hash)?; + if block.is_none() { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + block = self + .state + .tree_state + .block_by_hash(hash) + // TODO: clone for compatibility. should we return an Arc here? + .map(|block| block.as_ref().clone().unseal()); + } + Ok(block) + } + + /// Return state provider with reference to in-memory blocks that overlay database state. + fn state_provider( + &self, + hash: B256, + ) -> ProviderResult>> { + let mut in_memory = Vec::new(); + let mut parent_hash = hash; + while let Some(executed) = self.state.tree_state.blocks_by_hash.get(&parent_hash) { + parent_hash = executed.block.parent_hash; + in_memory.insert(0, executed.clone()); + } + + let historical = self.provider.state_by_block_hash(parent_hash)?; + Ok(MemoryOverlayStateProvider::new(in_memory, historical)) + } + + /// Return the parent hash of the lowest buffered ancestor for the requested block, if there + /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does + /// not exist in the buffer, this returns the hash that is passed in. + /// + /// Returns the parent hash of the block itself if the block is buffered and has no other + /// buffered ancestors. + fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { + self.state + .buffer + .lowest_ancestor(&hash) + .map(|block| block.parent_hash) + .unwrap_or_else(|| hash) + } + + /// If validation fails, the response MUST contain the latest valid hash: + /// + /// - The block hash of the ancestor of the invalid payload satisfying the following two + /// conditions: + /// - It is fully validated and deemed VALID + /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID + /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above + /// conditions are satisfied by a `PoW` block. + /// - null if client software cannot determine the ancestor of the invalid payload satisfying + /// the above conditions. + fn latest_valid_hash_for_invalid_payload( + &mut self, + parent_hash: B256, + ) -> ProviderResult> { + // Check if parent exists in side chain or in canonical chain. + if self.block_by_hash(parent_hash)?.is_some() { + return Ok(Some(parent_hash)) + } + + // iterate over ancestors in the invalid cache + // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.state.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.state.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + return Ok(Some(current_hash)) + } + } + Ok(None) + } + + /// Prepares the invalid payload response for the given hash, checking the + /// database for the parent hash and populating the payload status with the latest valid hash + /// according to the engine api spec. + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { + // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal + // PoW block, which we need to identify by looking at the parent's block difficulty + if let Some(parent) = self.block_by_hash(parent_hash)? { + if !parent.is_zero_difficulty() { + parent_hash = B256::ZERO; + } + } + + let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; + Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }) + .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) + } + + /// Checks if the given `check` hash points to an invalid header, inserting the given `head` + /// block into the invalid header cache if the `check` hash has a known invalid ancestor. + /// + /// Returns a payload status response according to the engine API spec if the block is known to + /// be invalid. + fn check_invalid_ancestor_with_head( + &mut self, + check: B256, + head: B256, + ) -> ProviderResult> { + // check if the check hash was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; + + // populate the latest valid hash field + let status = self.prepare_invalid_response(header.parent_hash)?; + + // insert the head block into the invalid header cache + self.state.invalid_headers.insert_with_invalid_ancestor(head, header); + + Ok(Some(status)) + } + + /// Checks if the given `head` points to an invalid header, which requires a specific response + /// to a forkchoice update. + fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { + // check if the head was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; + // populate the latest valid hash field + Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + } + + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { + error!( + ?block, + "Failed to validate total difficulty for block {}: {e}", + block.header.hash() + ); + return Err(e) + } + + if let Err(e) = self.consensus.validate_header(block) { + error!(?block, "Failed to validate header {}: {e}", block.header.hash()); + return Err(e) + } + + if let Err(e) = self.consensus.validate_block_pre_execution(block) { + error!(?block, "Failed to validate block {}: {e}", block.header.hash()); + return Err(e) + } + + Ok(()) + } + + fn buffer_block_without_senders(&mut self, block: SealedBlock) -> Result<(), InsertBlockError> { + match block.try_seal_with_senders() { + Ok(block) => self.buffer_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { + if let Err(err) = self.validate_block(&block) { + return Err(InsertBlockError::consensus_error(err, block.block)) + } + self.state.buffer.insert_block(block); + Ok(()) + } + + fn insert_block_without_senders( + &mut self, + block: SealedBlock, + ) -> Result { + match block.try_seal_with_senders() { + Ok(block) => self.insert_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn insert_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + self.insert_block_inner(block.clone()) + .map_err(|kind| InsertBlockError::new(block.block, kind)) + } + + fn insert_block_inner( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + if self.block_by_hash(block.hash())?.is_some() { + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid(attachment))) + } + + // validate block consensus rules + self.validate_block(&block)?; + + let state_provider = self.state_provider(block.parent_hash).unwrap(); + let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); + + let block_number = block.number; + let block_hash = block.hash(); + let block = block.unseal(); + let output = executor.execute((&block, U256::MAX).into()).unwrap(); + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + // TODO: change StateRootProvider API to accept hashed post state + let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + + let (state_root, trie_output) = state_provider.state_root_with_updates(&output.state)?; + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + let executed = ExecutedBlock { + block: Arc::new(block.block.seal(block_hash)), + senders: Arc::new(block.senders), + execution_output: Arc::new(ExecutionOutcome::new( + output.state, + Receipts::from(output.receipts), + block_number, + vec![Requests::from(output.requests)], + )), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; + self.state.tree_state.insert_executed(executed); + + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(attachment))) + } + + /// Pre-validate forkchoice update and check whether it can be processed. + /// + /// This method returns the update outcome if validation fails or + /// the node is syncing and the update cannot be processed at the moment. + fn pre_validate_forkchoice_update( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())) + } + + // check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) + } + + if self.is_pipeline_active { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())) + } + + Ok(None) + } +} + +impl EngineApiTreeHandler for EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes + 'static, +{ + type Engine = T; + + fn on_downloaded(&mut self, _blocks: Vec) -> Option { + todo!() + } + + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult> { + // Ensures that the given payload does not violate any consensus rules that concern the + // block's layout, like: + // - missing or invalid base fee + // - invalid extra data + // - invalid transactions + // - incorrect hash + // - the versioned hashes passed with the payload do not exactly match transaction + // versioned hashes + // - the block does not contain blob transactions if it is pre-cancun + // + // This validates the following engine API rule: + // + // 3. Given the expected array of blob versioned hashes client software **MUST** run its + // validation by taking the following steps: + // + // 1. Obtain the actual array by concatenating blob versioned hashes lists + // (`tx.blob_versioned_hashes`) of each [blob + // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included + // in the payload, respecting the order of inclusion. If the payload has no blob + // transactions the expected array **MUST** be `[]`. + // + // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | + // null}` if the expected and the actual arrays don't match. + // + // This validation **MUST** be instantly run in all cases even during active sync process. + let parent_hash = payload.parent_hash(); + let block = match self + .payload_validator + .ensure_well_formed_payload(payload, cancun_fields.into()) + { + Ok(block) => block, + Err(error) => { + error!(target: "engine::tree", %error, "Invalid payload"); + // we need to convert the error to a payload status (response to the CL) + + let latest_valid_hash = + if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { + // Engine-API rules: + // > `latestValidHash: null` if the blockHash validation has failed () + // > `latestValidHash: null` if the expected and the actual arrays don't match () + None + } else { + self.latest_valid_hash_for_invalid_payload(parent_hash)? + }; + + let status = PayloadStatusEnum::from(error); + return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) + } + }; + + let block_hash = block.hash(); + let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); + if lowest_buffered_ancestor == block_hash { + lowest_buffered_ancestor = block.parent_hash; + } + + // now check the block itself + if let Some(status) = + self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_hash)? + { + return Ok(TreeOutcome::new(status)) + } + + let status = if self.is_pipeline_active { + self.buffer_block_without_senders(block).unwrap(); + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + let mut latest_valid_hash = None; + let status = match self.insert_block_without_senders(block).unwrap() { + InsertPayloadOk::Inserted(BlockStatus::Valid(_)) | + InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { + latest_valid_hash = Some(block_hash); + PayloadStatusEnum::Valid + } + InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | + InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { + // TODO: isn't this check redundant? + // check if the block's parent is already marked as invalid + // if let Some(status) = self + // .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) + // .map_err(|error| { + // InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)) + // })? + // { + // return Ok(status) + // } + + // not known to be invalid, but we don't know anything else + PayloadStatusEnum::Syncing + } + }; + PayloadStatus::new(status, latest_valid_hash) + }; + + let mut outcome = TreeOutcome::new(status); + if outcome.outcome.is_valid() { + if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { + if target.head_block_hash == block_hash { + outcome = outcome + .with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); + } + } + } + Ok(outcome) + } + + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult> { + if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { + self.state.forkchoice_state_tracker.set_latest(state, on_updated.forkchoice_status()); + return Ok(TreeOutcome::new(on_updated)) + } + + todo!() + } +} diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml new file mode 100644 index 000000000000..26d504a745af --- /dev/null +++ b/crates/engine/util/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "reth-engine-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-fs-util.workspace = true +reth-rpc.workspace = true +reth-rpc-types.workspace = true +reth-engine-primitives.workspace = true +reth-beacon-consensus.workspace = true + +# async +tokio-util.workspace = true +pin-project.workspace = true + +# misc +eyre.workspace = true + +# io +serde.workspace = true +serde_json.workspace = true + +# tracing +tracing.workspace = true + +# async +futures.workspace = true + +[features] +optimism = [ + "reth-rpc/optimism", + "reth-beacon-consensus/optimism", +] diff --git a/crates/node-core/src/engine/engine_store.rs b/crates/engine/util/src/engine_store.rs similarity index 99% rename from crates/node-core/src/engine/engine_store.rs rename to crates/engine/util/src/engine_store.rs index 21e3c370a855..c6e2b65c5be0 100644 --- a/crates/node-core/src/engine/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -129,7 +129,7 @@ pub struct EngineStoreStream { impl EngineStoreStream { /// Create new engine store stream wrapper. - pub fn new(stream: S, path: PathBuf) -> Self { + pub const fn new(stream: S, path: PathBuf) -> Self { Self { stream, store: EngineMessageStore::new(path) } } } diff --git a/crates/node-core/src/engine/mod.rs b/crates/engine/util/src/lib.rs similarity index 100% rename from crates/node-core/src/engine/mod.rs rename to crates/engine/util/src/lib.rs diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs similarity index 100% rename from crates/node-core/src/engine/skip_fcu.rs rename to crates/engine/util/src/skip_fcu.rs diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs similarity index 100% rename from crates/node-core/src/engine/skip_new_payload.rs rename to crates/engine/util/src/skip_new_payload.rs diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index fb2ad5820b4d..9bdc0c98c91e 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -22,12 +22,16 @@ crc = "3" # misc serde = { workspace = true, features = ["derive"], optional = true } -thiserror.workspace = true +thiserror-no-std = { workspace = true, default-features = false } +once_cell.workspace = true +dyn-clone.workspace = true +rustc-hash.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } +auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -35,7 +39,8 @@ proptest.workspace = true proptest-derive.workspace = true [features] -default = ["serde"] -serde = ["dep:serde"] +default = ["std", "serde"] arbitrary = ["dep:arbitrary", "dep:proptest", "dep:proptest-derive"] optimism = [] +serde = ["dep:serde"] +std = ["thiserror-no-std/std"] diff --git a/crates/ethereum-forks/src/display.rs b/crates/ethereum-forks/src/display.rs new file mode 100644 index 000000000000..d8a2007e443e --- /dev/null +++ b/crates/ethereum-forks/src/display.rs @@ -0,0 +1,174 @@ +#[cfg(not(feature = "std"))] +use alloc::{ + collections::BTreeMap, + format, + string::{String, ToString}, + vec::Vec, +}; + +use crate::{hardforks::Hardforks, ForkCondition}; + +/// A container to pretty-print a hardfork. +/// +/// The fork is formatted depending on its fork condition: +/// +/// - Block and timestamp based forks are formatted in the same manner (`{name} <({eip})> +/// @{condition}`) +/// - TTD based forks are formatted separately as `{name} <({eip})> @{ttd} (network is known +/// to be merged)` +/// +/// An optional EIP can be attached to the fork to display as well. This should generally be in the +/// form of just `EIP-x`, e.g. `EIP-1559`. +#[derive(Debug)] +struct DisplayFork { + /// The name of the hardfork (e.g. Frontier) + name: String, + /// The fork condition + activated_at: ForkCondition, + /// An optional EIP (e.g. `EIP-1559`). + eip: Option, +} + +impl core::fmt::Display for DisplayFork { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let name_with_eip = if let Some(eip) = &self.eip { + format!("{} ({})", self.name, eip) + } else { + self.name.clone() + }; + + match self.activated_at { + ForkCondition::Block(at) | ForkCondition::Timestamp(at) => { + write!(f, "{name_with_eip:32} @{at}")?; + } + ForkCondition::TTD { fork_block, total_difficulty } => { + write!( + f, + "{:32} @{} ({})", + name_with_eip, + total_difficulty, + if fork_block.is_some() { + "network is known to be merged" + } else { + "network is not known to be merged" + } + )?; + } + ForkCondition::Never => unreachable!(), + } + + Ok(()) + } +} + +// Todo: This will result in dep cycle so currently commented out +// # Examples +// +// ``` +// # use reth_chainspec::MAINNET; +// println!("{}", MAINNET.display_hardforks()); +// ``` +// +/// A container for pretty-printing a list of hardforks. +/// +/// An example of the output: +/// +/// ```text +/// Pre-merge hard forks (block based): +// - Frontier @0 +// - Homestead @1150000 +// - Dao @1920000 +// - Tangerine @2463000 +// - SpuriousDragon @2675000 +// - Byzantium @4370000 +// - Constantinople @7280000 +// - Petersburg @7280000 +// - Istanbul @9069000 +// - MuirGlacier @9200000 +// - Berlin @12244000 +// - London @12965000 +// - ArrowGlacier @13773000 +// - GrayGlacier @15050000 +// Merge hard forks: +// - Paris @58750000000000000000000 (network is known to be merged) +// Post-merge hard forks (timestamp based): +// - Shanghai @1681338455 +/// ``` +#[derive(Debug)] +pub struct DisplayHardforks { + /// A list of pre-merge (block based) hardforks + pre_merge: Vec, + /// A list of merge (TTD based) hardforks + with_merge: Vec, + /// A list of post-merge (timestamp based) hardforks + post_merge: Vec, +} + +impl core::fmt::Display for DisplayHardforks { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + fn format( + header: &str, + forks: &[DisplayFork], + next_is_empty: bool, + f: &mut core::fmt::Formatter<'_>, + ) -> core::fmt::Result { + writeln!(f, "{header}:")?; + let mut iter = forks.iter().peekable(); + while let Some(fork) = iter.next() { + write!(f, "- {fork}")?; + if !next_is_empty || iter.peek().is_some() { + writeln!(f)?; + } + } + Ok(()) + } + + format( + "Pre-merge hard forks (block based)", + &self.pre_merge, + self.with_merge.is_empty(), + f, + )?; + + if !self.with_merge.is_empty() { + format("Merge hard forks", &self.with_merge, self.post_merge.is_empty(), f)?; + } + + if !self.post_merge.is_empty() { + format("Post-merge hard forks (timestamp based)", &self.post_merge, true, f)?; + } + + Ok(()) + } +} + +impl DisplayHardforks { + /// Creates a new [`DisplayHardforks`] from an iterator of hardforks. + pub fn new(hardforks: &H, known_paris_block: Option) -> Self { + let mut pre_merge = Vec::new(); + let mut with_merge = Vec::new(); + let mut post_merge = Vec::new(); + + for (fork, condition) in hardforks.forks_iter() { + let mut display_fork = + DisplayFork { name: fork.name().to_string(), activated_at: condition, eip: None }; + + match condition { + ForkCondition::Block(_) => { + pre_merge.push(display_fork); + } + ForkCondition::TTD { total_difficulty, .. } => { + display_fork.activated_at = + ForkCondition::TTD { fork_block: known_paris_block, total_difficulty }; + with_merge.push(display_fork); + } + ForkCondition::Timestamp(_) => { + post_merge.push(display_fork); + } + ForkCondition::Never => continue, + } + } + + Self { pre_merge, with_merge, post_merge } + } +} diff --git a/crates/ethereum-forks/src/forkcondition.rs b/crates/ethereum-forks/src/forkcondition.rs new file mode 100644 index 000000000000..80c7fff647bd --- /dev/null +++ b/crates/ethereum-forks/src/forkcondition.rs @@ -0,0 +1,110 @@ +use crate::Head; +use alloy_primitives::{BlockNumber, U256}; + +/// The condition at which a fork is activated. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum ForkCondition { + /// The fork is activated after a certain block. + Block(BlockNumber), + /// The fork is activated after a total difficulty has been reached. + TTD { + /// The block number at which TTD is reached, if it is known. + /// + /// This should **NOT** be set unless you want this block advertised as [EIP-2124][eip2124] + /// `FORK_NEXT`. This is currently only the case for Sepolia and Holesky. + /// + /// [eip2124]: https://eips.ethereum.org/EIPS/eip-2124 + fork_block: Option, + /// The total difficulty after which the fork is activated. + total_difficulty: U256, + }, + /// The fork is activated after a specific timestamp. + Timestamp(u64), + /// The fork is never activated + #[default] + Never, +} + +impl ForkCondition { + /// Returns true if the fork condition is timestamp based. + pub const fn is_timestamp(&self) -> bool { + matches!(self, Self::Timestamp(_)) + } + + /// Checks whether the fork condition is satisfied at the given block. + /// + /// For TTD conditions, this will only return true if the activation block is already known. + /// + /// For timestamp conditions, this will always return false. + pub const fn active_at_block(&self, current_block: BlockNumber) -> bool { + matches!(self, Self::Block(block) + | Self::TTD { fork_block: Some(block), .. } if current_block >= *block) + } + + /// Checks if the given block is the first block that satisfies the fork condition. + /// + /// This will return false for any condition that is not block based. + pub const fn transitions_at_block(&self, current_block: BlockNumber) -> bool { + matches!(self, Self::Block(block) if current_block == *block) + } + + /// Checks whether the fork condition is satisfied at the given total difficulty and difficulty + /// of a current block. + /// + /// The fork is considered active if the _previous_ total difficulty is above the threshold. + /// To achieve that, we subtract the passed `difficulty` from the current block's total + /// difficulty, and check if it's above the Fork Condition's total difficulty (here: + /// `58_750_000_000_000_000_000_000`) + /// + /// This will return false for any condition that is not TTD-based. + pub fn active_at_ttd(&self, ttd: U256, difficulty: U256) -> bool { + matches!(self, Self::TTD { total_difficulty, .. } + if ttd.saturating_sub(difficulty) >= *total_difficulty) + } + + /// Checks whether the fork condition is satisfied at the given timestamp. + /// + /// This will return false for any condition that is not timestamp-based. + pub const fn active_at_timestamp(&self, timestamp: u64) -> bool { + matches!(self, Self::Timestamp(time) if timestamp >= *time) + } + + /// Checks if the given block is the first block that satisfies the fork condition. + /// + /// This will return false for any condition that is not timestamp based. + pub const fn transitions_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { + matches!(self, Self::Timestamp(time) if timestamp >= *time && parent_timestamp < *time) + } + + /// Checks whether the fork condition is satisfied at the given head block. + /// + /// This will return true if: + /// + /// - The condition is satisfied by the block number; + /// - The condition is satisfied by the timestamp; + /// - or the condition is satisfied by the total difficulty + pub fn active_at_head(&self, head: &Head) -> bool { + self.active_at_block(head.number) || + self.active_at_timestamp(head.timestamp) || + self.active_at_ttd(head.total_difficulty, head.difficulty) + } + + /// Get the total terminal difficulty for this fork condition. + /// + /// Returns `None` for fork conditions that are not TTD based. + pub const fn ttd(&self) -> Option { + match self { + Self::TTD { total_difficulty, .. } => Some(*total_difficulty), + _ => None, + } + } + + /// Returns the timestamp of the fork condition, if it is timestamp based. + pub const fn as_timestamp(&self) -> Option { + match self { + Self::Timestamp(timestamp) => Some(*timestamp), + _ => None, + } + } +} diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index baf6ac021525..0a4d752b9b0d 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -3,22 +3,27 @@ //! Previously version of Apache licenced [`ethereum-forkid`](https://crates.io/crates/ethereum-forkid). use crate::Head; +#[cfg(not(feature = "std"))] +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec::Vec, +}; use alloy_primitives::{hex, BlockNumber, B256}; use alloy_rlp::{Error as RlpError, *}; #[cfg(any(test, feature = "arbitrary"))] use arbitrary::Arbitrary; +use core::{ + cmp::Ordering, + fmt, + ops::{Add, AddAssign}, +}; use crc::*; #[cfg(any(test, feature = "arbitrary"))] use proptest_derive::Arbitrary as PropTestArbitrary; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::{ - cmp::Ordering, - collections::{BTreeMap, BTreeSet}, - fmt, - ops::{Add, AddAssign}, -}; -use thiserror::Error; +#[cfg(feature = "std")] +use std::collections::{BTreeMap, BTreeSet}; const CRC_32_IEEE: Crc = Crc::::new(&CRC_32_ISO_HDLC); const TIMESTAMP_BEFORE_ETHEREUM_MAINNET: u64 = 1_300_000_000; @@ -174,7 +179,7 @@ impl From for ForkId { } /// Reason for rejecting provided `ForkId`. -#[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, Debug, thiserror_no_std::Error, PartialEq, Eq, Hash)] pub enum ValidationError { /// Remote node is outdated and needs a software update. #[error( diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs deleted file mode 100644 index 9ae1952fe62a..000000000000 --- a/crates/ethereum-forks/src/hardfork.rs +++ /dev/null @@ -1,695 +0,0 @@ -use alloy_chains::Chain; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; -use std::{fmt::Display, str::FromStr}; - -/// Represents the consensus type of a blockchain fork. -/// -/// This enum defines two variants: `ProofOfWork` for hardforks that use a proof-of-work consensus -/// mechanism, and `ProofOfStake` for hardforks that use a proof-of-stake consensus mechanism. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum ConsensusType { - /// Indicates a proof-of-work consensus mechanism. - ProofOfWork, - /// Indicates a proof-of-stake consensus mechanism. - ProofOfStake, -} - -/// The name of an Ethereum hardfork. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] -#[non_exhaustive] -pub enum Hardfork { - /// Frontier: . - Frontier, - /// Homestead: . - Homestead, - /// The DAO fork: . - Dao, - /// Tangerine: . - Tangerine, - /// Spurious Dragon: . - SpuriousDragon, - /// Byzantium: . - Byzantium, - /// Constantinople: . - Constantinople, - /// Petersburg: . - Petersburg, - /// Istanbul: . - Istanbul, - /// Muir Glacier: . - MuirGlacier, - /// Berlin: . - Berlin, - /// London: . - London, - /// Arrow Glacier: . - ArrowGlacier, - /// Gray Glacier: . - GrayGlacier, - /// Paris: . - Paris, - /// Bedrock: . - #[cfg(feature = "optimism")] - Bedrock, - /// Regolith: . - #[cfg(feature = "optimism")] - Regolith, - /// Shanghai: . - Shanghai, - /// Canyon: - /// . - #[cfg(feature = "optimism")] - Canyon, - // ArbOS11, - /// Cancun. - Cancun, - /// Ecotone: . - #[cfg(feature = "optimism")] - Ecotone, - // ArbOS20Atlas, - - // Upcoming - /// Prague: - Prague, - /// Fjord: - #[cfg(feature = "optimism")] - Fjord, -} - -impl Hardfork { - /// Retrieves the consensus type for the specified hardfork. - pub fn consensus_type(&self) -> ConsensusType { - if *self >= Self::Paris { - ConsensusType::ProofOfStake - } else { - ConsensusType::ProofOfWork - } - } - - /// Checks if the hardfork uses Proof of Stake consensus. - pub fn is_proof_of_stake(&self) -> bool { - matches!(self.consensus_type(), ConsensusType::ProofOfStake) - } - - /// Checks if the hardfork uses Proof of Work consensus. - pub fn is_proof_of_work(&self) -> bool { - matches!(self.consensus_type(), ConsensusType::ProofOfWork) - } - - /// Retrieves the activation block for the specified hardfork on the given chain. - pub fn activation_block(&self, chain: Chain) -> Option { - if chain == Chain::mainnet() { - return self.mainnet_activation_block(); - } - if chain == Chain::sepolia() { - return self.sepolia_activation_block(); - } - if chain == Chain::holesky() { - return self.holesky_activation_block(); - } - - #[cfg(feature = "optimism")] - { - if chain == Chain::base_sepolia() { - return self.base_sepolia_activation_block(); - } - if chain == Chain::base_mainnet() { - return self.base_mainnet_activation_block(); - } - } - - None - } - - /// Retrieves the activation block for the specified hardfork on the Ethereum mainnet. - pub const fn mainnet_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier => Some(0), - Self::Homestead => Some(1150000), - Self::Dao => Some(1920000), - Self::Tangerine => Some(2463000), - Self::SpuriousDragon => Some(2675000), - Self::Byzantium => Some(4370000), - Self::Constantinople | Self::Petersburg => Some(7280000), - Self::Istanbul => Some(9069000), - Self::MuirGlacier => Some(9200000), - Self::Berlin => Some(12244000), - Self::London => Some(12965000), - Self::ArrowGlacier => Some(13773000), - Self::GrayGlacier => Some(15050000), - Self::Paris => Some(15537394), - Self::Shanghai => Some(17034870), - Self::Cancun => Some(19426587), - - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Sepolia testnet. - pub const fn sepolia_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Paris => Some(1735371), - Self::Shanghai => Some(2990908), - Self::Cancun => Some(5187023), - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier => Some(0), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Arbitrum Sepolia testnet. - pub const fn arbitrum_sepolia_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(0), - Self::Shanghai => Some(10653737), - // Hardfork::ArbOS11 => Some(10653737), - Self::Cancun => Some(18683405), - // Hardfork::ArbOS20Atlas => Some(18683405), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Arbitrum One mainnet. - pub const fn arbitrum_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(0), - Self::Shanghai => Some(184097479), - // Hardfork::ArbOS11 => Some(184097479), - Self::Cancun => Some(190301729), - // Hardfork::ArbOS20Atlas => Some(190301729), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Base Sepolia testnet. - #[cfg(feature = "optimism")] - pub const fn base_sepolia_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(0), - Self::Shanghai | Self::Canyon => Some(2106456), - Self::Cancun | Self::Ecotone => Some(6383256), - Self::Fjord => Some(10615056), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Base mainnet. - #[cfg(feature = "optimism")] - pub const fn base_mainnet_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(0), - Self::Shanghai | Self::Canyon => Some(9101527), - Self::Cancun | Self::Ecotone => Some(11188936), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the holesky testnet. - const fn holesky_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(0), - Self::Shanghai => Some(6698), - Self::Cancun => Some(894733), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the given chain. - pub fn activation_timestamp(&self, chain: Chain) -> Option { - if chain == Chain::mainnet() { - return self.mainnet_activation_timestamp(); - } - if chain == Chain::sepolia() { - return self.sepolia_activation_timestamp(); - } - if chain == Chain::holesky() { - return self.holesky_activation_timestamp(); - } - #[cfg(feature = "optimism")] - { - if chain == Chain::base_sepolia() { - return self.base_sepolia_activation_timestamp(); - } - if chain == Chain::base_mainnet() { - return self.base_mainnet_activation_timestamp(); - } - } - - None - } - - /// Retrieves the activation timestamp for the specified hardfork on the Ethereum mainnet. - pub const fn mainnet_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier => Some(1438226773), - Self::Homestead => Some(1457938193), - Self::Dao => Some(1468977640), - Self::Tangerine => Some(1476753571), - Self::SpuriousDragon => Some(1479788144), - Self::Byzantium => Some(1508131331), - Self::Constantinople | Self::Petersburg => Some(1551340324), - Self::Istanbul => Some(1575807909), - Self::MuirGlacier => Some(1577953849), - Self::Berlin => Some(1618481223), - Self::London => Some(1628166822), - Self::ArrowGlacier => Some(1639036523), - Self::GrayGlacier => Some(1656586444), - Self::Paris => Some(1663224162), - Self::Shanghai => Some(1681338455), - Self::Cancun => Some(1710338135), - - // upcoming hardforks - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Sepolia testnet. - pub const fn sepolia_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1633267481), - Self::Shanghai => Some(1677557088), - Self::Cancun => Some(1706655072), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Holesky testnet. - pub const fn holesky_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Shanghai => Some(1696000704), - Self::Cancun => Some(1707305664), - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1695902100), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum Sepolia - /// testnet. - pub const fn arbitrum_sepolia_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1692726996), - Self::Shanghai => Some(1706634000), - // Hardfork::ArbOS11 => Some(1706634000), - Self::Cancun => Some(1709229600), - // Hardfork::ArbOS20Atlas => Some(1709229600), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum One mainnet. - pub const fn arbitrum_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1622240000), - Self::Shanghai => Some(1708804873), - // Hardfork::ArbOS11 => Some(1708804873), - Self::Cancun => Some(1710424089), - // Hardfork::ArbOS20Atlas => Some(1710424089), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Base Sepolia testnet. - #[cfg(feature = "optimism")] - pub const fn base_sepolia_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(1695768288), - Self::Shanghai | Self::Canyon => Some(1699981200), - Self::Cancun | Self::Ecotone => Some(1708534800), - Self::Fjord => Some(1716998400), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Base mainnet. - #[cfg(feature = "optimism")] - pub const fn base_mainnet_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(1686789347), - Self::Shanghai | Self::Canyon => Some(1704992401), - Self::Cancun | Self::Ecotone => Some(1710374401), - Self::Fjord => Some(1720627201), - _ => None, - } - } -} - -impl FromStr for Hardfork { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(match s.to_lowercase().as_str() { - "frontier" => Self::Frontier, - "homestead" => Self::Homestead, - "dao" => Self::Dao, - "tangerine" => Self::Tangerine, - "spuriousdragon" => Self::SpuriousDragon, - "byzantium" => Self::Byzantium, - "constantinople" => Self::Constantinople, - "petersburg" => Self::Petersburg, - "istanbul" => Self::Istanbul, - "muirglacier" => Self::MuirGlacier, - "berlin" => Self::Berlin, - "london" => Self::London, - "arrowglacier" => Self::ArrowGlacier, - "grayglacier" => Self::GrayGlacier, - "paris" => Self::Paris, - "shanghai" => Self::Shanghai, - "cancun" => Self::Cancun, - #[cfg(feature = "optimism")] - "bedrock" => Self::Bedrock, - #[cfg(feature = "optimism")] - "regolith" => Self::Regolith, - #[cfg(feature = "optimism")] - "canyon" => Self::Canyon, - #[cfg(feature = "optimism")] - "ecotone" => Self::Ecotone, - #[cfg(feature = "optimism")] - "fjord" => Self::Fjord, - "prague" => Self::Prague, - // "arbos11" => Hardfork::ArbOS11, - // "arbos20atlas" => Hardfork::ArbOS20Atlas, - _ => return Err(format!("Unknown hardfork: {s}")), - }) - } -} - -impl Display for Hardfork { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:?}") - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn check_hardfork_from_str() { - let hardfork_str = [ - "frOntier", - "homEstead", - "dao", - "tAngerIne", - "spurIousdrAgon", - "byzAntium", - "constantinople", - "petersburg", - "istanbul", - "muirglacier", - "bErlin", - "lonDon", - "arrowglacier", - "grayglacier", - "PARIS", - "ShAnGhAI", - "CaNcUn", - "PrAguE", - ]; - let expected_hardforks = [ - Hardfork::Frontier, - Hardfork::Homestead, - Hardfork::Dao, - Hardfork::Tangerine, - Hardfork::SpuriousDragon, - Hardfork::Byzantium, - Hardfork::Constantinople, - Hardfork::Petersburg, - Hardfork::Istanbul, - Hardfork::MuirGlacier, - Hardfork::Berlin, - Hardfork::London, - Hardfork::ArrowGlacier, - Hardfork::GrayGlacier, - Hardfork::Paris, - Hardfork::Shanghai, - Hardfork::Cancun, - Hardfork::Prague, - ]; - - let hardforks: Vec = - hardfork_str.iter().map(|h| Hardfork::from_str(h).unwrap()).collect(); - - assert_eq!(hardforks, expected_hardforks); - } - - #[test] - #[cfg(feature = "optimism")] - fn check_op_hardfork_from_str() { - let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD"]; - let expected_hardforks = [ - Hardfork::Bedrock, - Hardfork::Regolith, - Hardfork::Canyon, - Hardfork::Ecotone, - Hardfork::Fjord, - ]; - - let hardforks: Vec = - hardfork_str.iter().map(|h| Hardfork::from_str(h).unwrap()).collect(); - - assert_eq!(hardforks, expected_hardforks); - } - - #[test] - fn check_nonexistent_hardfork_from_str() { - assert!(Hardfork::from_str("not a hardfork").is_err()); - } - - #[test] - fn check_consensus_type() { - let pow_hardforks = [ - Hardfork::Frontier, - Hardfork::Homestead, - Hardfork::Dao, - Hardfork::Tangerine, - Hardfork::SpuriousDragon, - Hardfork::Byzantium, - Hardfork::Constantinople, - Hardfork::Petersburg, - Hardfork::Istanbul, - Hardfork::MuirGlacier, - Hardfork::Berlin, - Hardfork::London, - Hardfork::ArrowGlacier, - Hardfork::GrayGlacier, - ]; - - let pos_hardforks = [Hardfork::Paris, Hardfork::Shanghai, Hardfork::Cancun]; - - #[cfg(feature = "optimism")] - let op_hardforks = [ - Hardfork::Bedrock, - Hardfork::Regolith, - Hardfork::Canyon, - Hardfork::Ecotone, - Hardfork::Fjord, - ]; - - for hardfork in &pow_hardforks { - assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfWork); - assert!(!hardfork.is_proof_of_stake()); - assert!(hardfork.is_proof_of_work()); - } - - for hardfork in &pos_hardforks { - assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfStake); - assert!(hardfork.is_proof_of_stake()); - assert!(!hardfork.is_proof_of_work()); - } - - #[cfg(feature = "optimism")] - for hardfork in &op_hardforks { - assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfStake); - assert!(hardfork.is_proof_of_stake()); - assert!(!hardfork.is_proof_of_work()); - } - } -} diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs new file mode 100644 index 000000000000..4b422141b425 --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -0,0 +1,32 @@ +use crate::{ChainHardforks, EthereumHardfork, ForkCondition}; +use alloy_primitives::U256; +use once_cell::sync::Lazy; + +/// Dev hardforks +pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: None, total_difficulty: U256::ZERO }, + ), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), + ]) +}); diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs new file mode 100644 index 000000000000..7a2618c3c70f --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -0,0 +1,418 @@ +use crate::{hardfork, ChainHardforks, ForkCondition, Hardfork}; +use alloy_chains::Chain; +use alloy_primitives::{uint, U256}; +use core::{ + fmt, + fmt::{Display, Formatter}, + str::FromStr, +}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +hardfork!( + /// The name of an Ethereum hardfork. + EthereumHardfork { + /// Frontier: . + Frontier, + /// Homestead: . + Homestead, + /// The DAO fork: . + Dao, + /// Tangerine: . + Tangerine, + /// Spurious Dragon: . + SpuriousDragon, + /// Byzantium: . + Byzantium, + /// Constantinople: . + Constantinople, + /// Petersburg: . + Petersburg, + /// Istanbul: . + Istanbul, + /// Muir Glacier: . + MuirGlacier, + /// Berlin: . + Berlin, + /// London: . + London, + /// Arrow Glacier: . + ArrowGlacier, + /// Gray Glacier: . + GrayGlacier, + /// Paris: . + Paris, + /// Shanghai: . + Shanghai, + /// Cancun. + Cancun, + /// Prague: + Prague, + } +); + +impl EthereumHardfork { + /// Retrieves the activation block for the specified hardfork on the given chain. + pub fn activation_block(&self, chain: Chain) -> Option { + if chain == Chain::mainnet() { + return self.mainnet_activation_block() + } + if chain == Chain::sepolia() { + return self.sepolia_activation_block() + } + if chain == Chain::holesky() { + return self.holesky_activation_block() + } + + None + } + + /// Retrieves the activation block for the specified hardfork on the Ethereum mainnet. + pub const fn mainnet_activation_block(&self) -> Option { + match self { + Self::Frontier => Some(0), + Self::Homestead => Some(1150000), + Self::Dao => Some(1920000), + Self::Tangerine => Some(2463000), + Self::SpuriousDragon => Some(2675000), + Self::Byzantium => Some(4370000), + Self::Constantinople | Self::Petersburg => Some(7280000), + Self::Istanbul => Some(9069000), + Self::MuirGlacier => Some(9200000), + Self::Berlin => Some(12244000), + Self::London => Some(12965000), + Self::ArrowGlacier => Some(13773000), + Self::GrayGlacier => Some(15050000), + Self::Paris => Some(15537394), + Self::Shanghai => Some(17034870), + Self::Cancun => Some(19426587), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the Sepolia testnet. + pub const fn sepolia_activation_block(&self) -> Option { + match self { + Self::Paris => Some(1735371), + Self::Shanghai => Some(2990908), + Self::Cancun => Some(5187023), + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier => Some(0), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the holesky testnet. + const fn holesky_activation_block(&self) -> Option { + match self { + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(0), + Self::Shanghai => Some(6698), + Self::Cancun => Some(894733), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the Arbitrum Sepolia testnet. + pub const fn arbitrum_sepolia_activation_block(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(0), + Self::Shanghai => Some(10653737), + // Hardfork::ArbOS11 => Some(10653737), + Self::Cancun => Some(18683405), + // Hardfork::ArbOS20Atlas => Some(18683405), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the Arbitrum One mainnet. + pub const fn arbitrum_activation_block(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(0), + Self::Shanghai => Some(184097479), + // Hardfork::ArbOS11 => Some(184097479), + Self::Cancun => Some(190301729), + // Hardfork::ArbOS20Atlas => Some(190301729), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the given chain. + pub fn activation_timestamp(&self, chain: Chain) -> Option { + if chain == Chain::mainnet() { + return self.mainnet_activation_timestamp() + } + if chain == Chain::sepolia() { + return self.sepolia_activation_timestamp() + } + if chain == Chain::holesky() { + return self.holesky_activation_timestamp() + } + + None + } + + /// Retrieves the activation timestamp for the specified hardfork on the Ethereum mainnet. + pub const fn mainnet_activation_timestamp(&self) -> Option { + match self { + Self::Frontier => Some(1438226773), + Self::Homestead => Some(1457938193), + Self::Dao => Some(1468977640), + Self::Tangerine => Some(1476753571), + Self::SpuriousDragon => Some(1479788144), + Self::Byzantium => Some(1508131331), + Self::Constantinople | Self::Petersburg => Some(1551340324), + Self::Istanbul => Some(1575807909), + Self::MuirGlacier => Some(1577953849), + Self::Berlin => Some(1618481223), + Self::London => Some(1628166822), + Self::ArrowGlacier => Some(1639036523), + Self::GrayGlacier => Some(1656586444), + Self::Paris => Some(1663224162), + Self::Shanghai => Some(1681338455), + Self::Cancun => Some(1710338135), + + // upcoming hardforks + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Sepolia testnet. + pub const fn sepolia_activation_timestamp(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1633267481), + Self::Shanghai => Some(1677557088), + Self::Cancun => Some(1706655072), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Holesky testnet. + pub const fn holesky_activation_timestamp(&self) -> Option { + match self { + Self::Shanghai => Some(1696000704), + Self::Cancun => Some(1707305664), + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1695902100), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum Sepolia + /// testnet. + pub const fn arbitrum_sepolia_activation_timestamp(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1692726996), + Self::Shanghai => Some(1706634000), + // Hardfork::ArbOS11 => Some(1706634000), + Self::Cancun => Some(1709229600), + // Hardfork::ArbOS20Atlas => Some(1709229600), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum One mainnet. + pub const fn arbitrum_activation_timestamp(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1622240000), + Self::Shanghai => Some(1708804873), + // Hardfork::ArbOS11 => Some(1708804873), + Self::Cancun => Some(1710424089), + // Hardfork::ArbOS20Atlas => Some(1710424089), + _ => None, + } + } + + /// Ethereum mainnet list of hardforks. + pub const fn mainnet() -> [(Self, ForkCondition); 17] { + [ + (Self::Frontier, ForkCondition::Block(0)), + (Self::Homestead, ForkCondition::Block(1150000)), + (Self::Dao, ForkCondition::Block(1920000)), + (Self::Tangerine, ForkCondition::Block(2463000)), + (Self::SpuriousDragon, ForkCondition::Block(2675000)), + (Self::Byzantium, ForkCondition::Block(4370000)), + (Self::Constantinople, ForkCondition::Block(7280000)), + (Self::Petersburg, ForkCondition::Block(7280000)), + (Self::Istanbul, ForkCondition::Block(9069000)), + (Self::MuirGlacier, ForkCondition::Block(9200000)), + (Self::Berlin, ForkCondition::Block(12244000)), + (Self::London, ForkCondition::Block(12965000)), + (Self::ArrowGlacier, ForkCondition::Block(13773000)), + (Self::GrayGlacier, ForkCondition::Block(15050000)), + ( + Self::Paris, + ForkCondition::TTD { + fork_block: None, + total_difficulty: uint!(58_750_000_000_000_000_000_000_U256), + }, + ), + (Self::Shanghai, ForkCondition::Timestamp(1681338455)), + (Self::Cancun, ForkCondition::Timestamp(1710338135)), + ] + } + + /// Ethereum sepolia list of hardforks. + pub const fn sepolia() -> [(Self, ForkCondition); 15] { + [ + (Self::Frontier, ForkCondition::Block(0)), + (Self::Homestead, ForkCondition::Block(0)), + (Self::Dao, ForkCondition::Block(0)), + (Self::Tangerine, ForkCondition::Block(0)), + (Self::SpuriousDragon, ForkCondition::Block(0)), + (Self::Byzantium, ForkCondition::Block(0)), + (Self::Constantinople, ForkCondition::Block(0)), + (Self::Petersburg, ForkCondition::Block(0)), + (Self::Istanbul, ForkCondition::Block(0)), + (Self::MuirGlacier, ForkCondition::Block(0)), + (Self::Berlin, ForkCondition::Block(0)), + (Self::London, ForkCondition::Block(0)), + ( + Self::Paris, + ForkCondition::TTD { + fork_block: Some(1735371), + total_difficulty: uint!(17_000_000_000_000_000_U256), + }, + ), + (Self::Shanghai, ForkCondition::Timestamp(1677557088)), + (Self::Cancun, ForkCondition::Timestamp(1706655072)), + ] + } + + /// Ethereum holesky list of hardforks. + pub const fn holesky() -> [(Self, ForkCondition); 15] { + [ + (Self::Frontier, ForkCondition::Block(0)), + (Self::Homestead, ForkCondition::Block(0)), + (Self::Dao, ForkCondition::Block(0)), + (Self::Tangerine, ForkCondition::Block(0)), + (Self::SpuriousDragon, ForkCondition::Block(0)), + (Self::Byzantium, ForkCondition::Block(0)), + (Self::Constantinople, ForkCondition::Block(0)), + (Self::Petersburg, ForkCondition::Block(0)), + (Self::Istanbul, ForkCondition::Block(0)), + (Self::MuirGlacier, ForkCondition::Block(0)), + (Self::Berlin, ForkCondition::Block(0)), + (Self::London, ForkCondition::Block(0)), + (Self::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }), + (Self::Shanghai, ForkCondition::Timestamp(1696000704)), + (Self::Cancun, ForkCondition::Timestamp(1707305664)), + ] + } +} + +impl From<[(EthereumHardfork, ForkCondition); N]> for ChainHardforks { + fn from(list: [(EthereumHardfork, ForkCondition); N]) -> Self { + Self::new( + list.into_iter() + .map(|(fork, cond)| (Box::new(fork) as Box, cond)) + .collect(), + ) + } +} diff --git a/crates/ethereum-forks/src/hardfork/macros.rs b/crates/ethereum-forks/src/hardfork/macros.rs new file mode 100644 index 000000000000..780c15f6e6b9 --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/macros.rs @@ -0,0 +1,52 @@ +/// Macro that defines different variants of a chain specific enum. See [`crate::Hardfork`] as an +/// example. +#[macro_export] +macro_rules! hardfork { + ($(#[$enum_meta:meta])* $enum:ident { $( $(#[$meta:meta])* $variant:ident ),* $(,)? }) => { + $(#[$enum_meta])* + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + #[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] + pub enum $enum { + $( $(#[$meta])* $variant ),* + } + + impl $enum { + /// Returns variant as `str`. + pub const fn name(&self) -> &'static str { + match self { + $( $enum::$variant => stringify!($variant), )* + } + } + + /// Boxes `self` and returns it as `Box`. + pub fn boxed(self) -> Box { + Box::new(self) + } + } + + impl FromStr for $enum { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + $( + s if s == stringify!($variant).to_lowercase() => Ok($enum::$variant), + )* + _ => return Err(format!("Unknown hardfork: {s}")), + } + } + } + + impl Hardfork for $enum { + fn name(&self) -> &'static str { + self.name() + } + } + + impl Display for $enum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{self:?}") + } + } + } +} diff --git a/crates/ethereum-forks/src/hardfork/mod.rs b/crates/ethereum-forks/src/hardfork/mod.rs new file mode 100644 index 000000000000..b6faef6ec2f0 --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/mod.rs @@ -0,0 +1,126 @@ +mod macros; + +mod ethereum; +pub use ethereum::EthereumHardfork; + +mod optimism; +pub use optimism::OptimismHardfork; + +mod dev; +pub use dev::DEV_HARDFORKS; + +use core::{ + any::Any, + hash::{Hash, Hasher}, +}; +use dyn_clone::DynClone; + +#[cfg(not(feature = "std"))] +use alloc::{format, string::String}; + +/// Generic hardfork trait. +#[auto_impl::auto_impl(&, Box)] +pub trait Hardfork: Any + DynClone + Send + Sync + 'static { + /// Fork name. + fn name(&self) -> &'static str; +} + +dyn_clone::clone_trait_object!(Hardfork); + +impl core::fmt::Debug for dyn Hardfork + 'static { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct(self.name()).finish() + } +} + +impl PartialEq for dyn Hardfork + 'static { + fn eq(&self, other: &Self) -> bool { + self.name() == other.name() + } +} + +impl Eq for dyn Hardfork + 'static {} + +impl Hash for dyn Hardfork + 'static { + fn hash(&self, state: &mut H) { + self.name().hash(state) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::hardfork::optimism::OptimismHardfork; + use std::str::FromStr; + + #[test] + fn check_hardfork_from_str() { + let hardfork_str = [ + "frOntier", + "homEstead", + "dao", + "tAngerIne", + "spurIousdrAgon", + "byzAntium", + "constantinople", + "petersburg", + "istanbul", + "muirglacier", + "bErlin", + "lonDon", + "arrowglacier", + "grayglacier", + "PARIS", + "ShAnGhAI", + "CaNcUn", + "PrAguE", + ]; + let expected_hardforks = [ + EthereumHardfork::Frontier, + EthereumHardfork::Homestead, + EthereumHardfork::Dao, + EthereumHardfork::Tangerine, + EthereumHardfork::SpuriousDragon, + EthereumHardfork::Byzantium, + EthereumHardfork::Constantinople, + EthereumHardfork::Petersburg, + EthereumHardfork::Istanbul, + EthereumHardfork::MuirGlacier, + EthereumHardfork::Berlin, + EthereumHardfork::London, + EthereumHardfork::ArrowGlacier, + EthereumHardfork::GrayGlacier, + EthereumHardfork::Paris, + EthereumHardfork::Shanghai, + EthereumHardfork::Cancun, + EthereumHardfork::Prague, + ]; + + let hardforks: Vec = + hardfork_str.iter().map(|h| EthereumHardfork::from_str(h).unwrap()).collect(); + + assert_eq!(hardforks, expected_hardforks); + } + + #[test] + fn check_op_hardfork_from_str() { + let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD"]; + let expected_hardforks = [ + OptimismHardfork::Bedrock, + OptimismHardfork::Regolith, + OptimismHardfork::Canyon, + OptimismHardfork::Ecotone, + OptimismHardfork::Fjord, + ]; + + let hardforks: Vec = + hardfork_str.iter().map(|h| OptimismHardfork::from_str(h).unwrap()).collect(); + + assert_eq!(hardforks, expected_hardforks); + } + + #[test] + fn check_nonexistent_hardfork_from_str() { + assert!(EthereumHardfork::from_str("not a hardfork").is_err()); + } +} diff --git a/crates/ethereum-forks/src/hardfork/optimism.rs b/crates/ethereum-forks/src/hardfork/optimism.rs new file mode 100644 index 000000000000..6933b7feddae --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/optimism.rs @@ -0,0 +1,337 @@ +use crate::{hardfork, ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; +use alloy_chains::Chain; +use alloy_primitives::U256; +use core::{ + any::Any, + fmt::{self, Display, Formatter}, + str::FromStr, +}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +hardfork!( + /// The name of an optimism hardfork. + /// + /// When building a list of hardforks for a chain, it's still expected to mix with [`EthereumHardfork`]. + OptimismHardfork { + /// Bedrock: . + Bedrock, + /// Regolith: . + Regolith, + /// . + Canyon, + /// Ecotone: . + Ecotone, + /// Fjord: + Fjord, + } +); + +impl OptimismHardfork { + /// Retrieves the activation block for the specified hardfork on the given chain. + pub fn activation_block(self, fork: H, chain: Chain) -> Option { + if chain == Chain::base_sepolia() { + return Self::base_sepolia_activation_block(fork) + } + if chain == Chain::base_mainnet() { + return Self::base_mainnet_activation_block(fork) + } + + None + } + + /// Retrieves the activation timestamp for the specified hardfork on the given chain. + pub fn activation_timestamp(self, fork: H, chain: Chain) -> Option { + if chain == Chain::base_sepolia() { + return Self::base_sepolia_activation_timestamp(fork) + } + if chain == Chain::base_mainnet() { + return Self::base_mainnet_activation_timestamp(fork) + } + + None + } + + /// Retrieves the activation block for the specified hardfork on the Base Sepolia testnet. + pub fn base_sepolia_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(2106456), + EthereumHardfork::Cancun => Some(6383256), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(0), + Self::Canyon => Some(2106456), + Self::Ecotone => Some(6383256), + Self::Fjord => Some(10615056), + }, + ) + } + + /// Retrieves the activation block for the specified hardfork on the Base mainnet. + pub fn base_mainnet_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(9101527), + EthereumHardfork::Cancun => Some(11188936), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(0), + Self::Canyon => Some(9101527), + Self::Ecotone => Some(11188936), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the Base Sepolia testnet. + pub fn base_sepolia_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(1699981200), + EthereumHardfork::Cancun => Some(1708534800), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(1695768288), + Self::Canyon => Some(1699981200), + Self::Ecotone => Some(1708534800), + Self::Fjord => Some(1716998400), + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the Base mainnet. + pub fn base_mainnet_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(1704992401), + EthereumHardfork::Cancun => Some(1710374401), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(1686789347), + Self::Canyon => Some(1704992401), + Self::Ecotone => Some(1710374401), + Self::Fjord => Some(1720627201), + }, + ) + } + + /// Optimism mainnet list of hardforks. + pub fn op_mainnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(3950000)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(105235063)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(105235063)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(105235063)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(105235063)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1704992401)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1704992401)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1720627201)), + ]) + } + + /// Optimism sepolia list of hardforks. + pub fn op_sepolia() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1699981200)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1699981200)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), + ]) + } + + /// Base sepolia list of hardforks. + pub fn base_sepolia() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1699981200)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1699981200)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), + ]) + } + + /// Base mainnet list of hardforks. + pub fn base_mainnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1704992401)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1704992401)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1720627201)), + ]) + } +} + +/// Match helper method since it's not possible to match on `dyn Hardfork` +fn match_hardfork(fork: H, hardfork_fn: HF, optimism_hardfork_fn: OHF) -> Option +where + H: Hardfork, + HF: Fn(&EthereumHardfork) -> Option, + OHF: Fn(&OptimismHardfork) -> Option, +{ + let fork: &dyn Any = ⋔ + if let Some(fork) = fork.downcast_ref::() { + return hardfork_fn(fork) + } + fork.downcast_ref::().and_then(optimism_hardfork_fn) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_match_hardfork() { + assert_eq!( + OptimismHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), + Some(11188936) + ); + assert_eq!( + OptimismHardfork::base_mainnet_activation_block(OptimismHardfork::Canyon), + Some(9101527) + ); + } +} diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs new file mode 100644 index 000000000000..3b4c860ad394 --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -0,0 +1,56 @@ +use crate::{ + hardforks::{ChainHardforks, Hardforks}, + EthereumHardfork, ForkCondition, +}; + +/// Helper methods for Ethereum forks. +pub trait EthereumHardforks: Hardforks { + /// Convenience method to check if [`EthereumHardfork::Shanghai`] is active at a given + /// timestamp. + fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Shanghai, timestamp) + } + + /// Convenience method to check if [`EthereumHardfork::Cancun`] is active at a given timestamp. + fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Cancun, timestamp) + } + + /// Convenience method to check if [`EthereumHardfork::Prague`] is active at a given timestamp. + fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) + } + + /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block + /// number. + fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { + self.fork(EthereumHardfork::Byzantium).active_at_block(block_number) + } + + /// Convenience method to check if [`EthereumHardfork::SpuriousDragon`] is active at a given + /// block number. + fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { + self.fork(EthereumHardfork::SpuriousDragon).active_at_block(block_number) + } + + /// Convenience method to check if [`EthereumHardfork::Homestead`] is active at a given block + /// number. + fn is_homestead_active_at_block(&self, block_number: u64) -> bool { + self.fork(EthereumHardfork::Homestead).active_at_block(block_number) + } + + /// The Paris hardfork (merge) is activated via block number. If we have knowledge of the block, + /// this function will return true if the block number is greater than or equal to the Paris + /// (merge) block. + fn is_paris_active_at_block(&self, block_number: u64) -> Option { + match self.fork(EthereumHardfork::Paris) { + ForkCondition::Block(paris_block) => Some(block_number >= paris_block), + ForkCondition::TTD { fork_block, .. } => { + fork_block.map(|paris_block| block_number >= paris_block) + } + _ => None, + } + } +} + +impl EthereumHardforks for ChainHardforks {} diff --git a/crates/ethereum-forks/src/hardforks/mod.rs b/crates/ethereum-forks/src/hardforks/mod.rs new file mode 100644 index 000000000000..121a189e0152 --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/mod.rs @@ -0,0 +1,131 @@ +/// Ethereum helper methods +mod ethereum; +pub use ethereum::EthereumHardforks; + +/// Optimism helper methods +mod optimism; +pub use optimism::OptimismHardforks; + +use crate::{ForkCondition, Hardfork}; +use rustc_hash::FxHashMap; + +/// Generic trait over a set of ordered hardforks +pub trait Hardforks: Default + Clone { + /// Retrieves [`ForkCondition`] from `fork`. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn fork(&self, fork: H) -> ForkCondition; + + /// Get an iterator of all hardforks with their respective activation conditions. + fn forks_iter(&self) -> impl Iterator; + + /// Convenience method to check if a fork is active at a given timestamp. + fn is_fork_active_at_timestamp(&self, fork: H, timestamp: u64) -> bool { + self.fork(fork).active_at_timestamp(timestamp) + } + + /// Convenience method to check if a fork is active at a given block number. + fn is_fork_active_at_block(&self, fork: H, block_number: u64) -> bool { + self.fork(fork).active_at_block(block_number) + } +} + +/// Ordered list of a chain hardforks that implement [`Hardfork`]. +#[derive(Default, Clone, PartialEq, Eq)] +pub struct ChainHardforks { + forks: Vec<(Box, ForkCondition)>, + map: FxHashMap<&'static str, ForkCondition>, +} + +impl ChainHardforks { + /// Creates a new [`ChainHardforks`] from a list which **must be ordered** by activation. + /// + /// Equivalent Ethereum hardforks **must be included** as well. + pub fn new(forks: Vec<(Box, ForkCondition)>) -> Self { + let map = forks.iter().map(|(fork, condition)| (fork.name(), *condition)).collect(); + + Self { forks, map } + } + + /// Total number of hardforks. + pub fn len(&self) -> usize { + self.forks.len() + } + + /// Checks if the fork list is empty. + pub fn is_empty(&self) -> bool { + self.forks.is_empty() + } + + /// Retrieves [`ForkCondition`] from `fork`. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + pub fn fork(&self, fork: H) -> ForkCondition { + self.get(fork).unwrap_or(ForkCondition::Never) + } + + /// Retrieves [`ForkCondition`] from `fork` if it exists, otherwise `None`. + pub fn get(&self, fork: H) -> Option { + self.map.get(fork.name()).copied() + } + + /// Get an iterator of all hardforks with their respective activation conditions. + pub fn forks_iter(&self) -> impl Iterator { + self.forks.iter().map(|(f, b)| (&**f, *b)) + } + + /// Get last hardfork from the list. + pub fn last(&self) -> Option<(Box, ForkCondition)> { + self.forks.last().map(|(f, b)| (f.clone(), *b)) + } + + /// Convenience method to check if a fork is active at a given timestamp. + pub fn is_fork_active_at_timestamp(&self, fork: H, timestamp: u64) -> bool { + self.fork(fork).active_at_timestamp(timestamp) + } + + /// Convenience method to check if a fork is active at a given block number. + pub fn is_fork_active_at_block(&self, fork: H, block_number: u64) -> bool { + self.fork(fork).active_at_block(block_number) + } + + /// Inserts `fork` into list, updating with a new [`ForkCondition`] if it already exists. + pub fn insert(&mut self, fork: H, condition: ForkCondition) { + match self.map.entry(fork.name()) { + std::collections::hash_map::Entry::Occupied(mut entry) => { + *entry.get_mut() = condition; + if let Some((_, inner)) = + self.forks.iter_mut().find(|(inner, _)| inner.name() == fork.name()) + { + *inner = condition; + } + } + std::collections::hash_map::Entry::Vacant(entry) => { + entry.insert(condition); + self.forks.push((Box::new(fork), condition)); + } + } + } + + /// Removes `fork` from list. + pub fn remove(&mut self, fork: H) { + self.forks.retain(|(inner_fork, _)| inner_fork.name() != fork.name()); + self.map.remove(fork.name()); + } +} + +impl Hardforks for ChainHardforks { + fn fork(&self, fork: H) -> ForkCondition { + self.fork(fork) + } + + fn forks_iter(&self) -> impl Iterator { + self.forks_iter() + } +} + +impl core::fmt::Debug for ChainHardforks { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ChainHardforks") + .field("0", &self.forks_iter().map(|(hf, cond)| (hf.name(), cond)).collect::>()) + .finish() + } +} diff --git a/crates/ethereum-forks/src/hardforks/optimism.rs b/crates/ethereum-forks/src/hardforks/optimism.rs new file mode 100644 index 000000000000..39b2bf4ab454 --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/optimism.rs @@ -0,0 +1,12 @@ +use crate::{ChainHardforks, EthereumHardforks, OptimismHardfork}; + +/// Extends [`crate::EthereumHardforks`] with optimism helper methods. +pub trait OptimismHardforks: EthereumHardforks { + /// Convenience method to check if [`OptimismHardfork::Bedrock`] is active at a given block + /// number. + fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { + self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) + } +} + +impl OptimismHardforks for ChainHardforks {} diff --git a/crates/ethereum-forks/src/head.rs b/crates/ethereum-forks/src/head.rs index 2cf29cca90e1..bd05cc3a772e 100644 --- a/crates/ethereum-forks/src/head.rs +++ b/crates/ethereum-forks/src/head.rs @@ -1,7 +1,7 @@ use alloy_primitives::{BlockNumber, B256, U256}; +use core::fmt; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::fmt; /// Describes the current head block. /// diff --git a/crates/ethereum-forks/src/lib.rs b/crates/ethereum-forks/src/lib.rs index 6dbec7c38d7c..51e619f4db0d 100644 --- a/crates/ethereum-forks/src/lib.rs +++ b/crates/ethereum-forks/src/lib.rs @@ -12,19 +12,28 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(not(feature = "std"))] +extern crate alloc; + +mod display; +mod forkcondition; mod forkid; mod hardfork; +mod hardforks; mod head; pub use forkid::{ EnrForkIdEntry, ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError, }; -pub use hardfork::Hardfork; +pub use hardfork::{EthereumHardfork, Hardfork, OptimismHardfork, DEV_HARDFORKS}; pub use head::Head; +pub use display::DisplayHardforks; +pub use forkcondition::ForkCondition; +pub use hardforks::*; + #[cfg(any(test, feature = "arbitrary"))] pub use arbitrary; diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml new file mode 100644 index 000000000000..18b5f9a47066 --- /dev/null +++ b/crates/ethereum/cli/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "reth-ethereum-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] diff --git a/crates/ethereum/cli/src/lib.rs b/crates/ethereum/cli/src/lib.rs new file mode 100644 index 000000000000..c55b2ab389d0 --- /dev/null +++ b/crates/ethereum/cli/src/lib.rs @@ -0,0 +1,9 @@ +//! Reth CLI implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index f1ee25085ef2..25c865a2bc18 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -12,11 +12,9 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-consensus-common.workspace = true reth-primitives.workspace = true reth-consensus.workspace = true tracing.workspace = true - -[features] -optimism = ["reth-primitives/optimism"] diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 3d245b94b114..66cd7bac953c 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,12 +8,16 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ - validate_block_pre_execution, validate_header_extradata, validate_header_standalone, + validate_4844_header_standalone, validate_against_parent_4844, + validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, + validate_against_parent_timestamp, validate_block_pre_execution, validate_header_base_fee, + validate_header_extradata, validate_header_gas, }; use reth_primitives::{ - BlockWithSenders, Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, + constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; @@ -32,14 +36,90 @@ pub struct EthBeaconConsensus { impl EthBeaconConsensus { /// Create a new instance of [`EthBeaconConsensus`] - pub fn new(chain_spec: Arc) -> Self { + pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } + + /// Checks the gas limit for consistency between parent and self headers. + /// + /// The maximum allowable difference between self and parent gas limits is determined by the + /// parent's gas limit divided by the elasticity multiplier (1024). + fn validate_against_parent_gas_limit( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + // Determine the parent gas limit, considering elasticity multiplier on the London fork. + let parent_gas_limit = + if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { + parent.gas_limit * + self.chain_spec + .base_fee_params_at_timestamp(header.timestamp) + .elasticity_multiplier as u64 + } else { + parent.gas_limit + }; + + // Check for an increase in gas limit beyond the allowed threshold. + if header.gas_limit > parent_gas_limit { + if header.gas_limit - parent_gas_limit >= parent_gas_limit / 1024 { + return Err(ConsensusError::GasLimitInvalidIncrease { + parent_gas_limit, + child_gas_limit: header.gas_limit, + }) + } + } + // Check for a decrease in gas limit beyond the allowed threshold. + else if parent_gas_limit - header.gas_limit >= parent_gas_limit / 1024 { + return Err(ConsensusError::GasLimitInvalidDecrease { + parent_gas_limit, + child_gas_limit: header.gas_limit, + }) + } + // Check if the self gas limit is below the minimum required limit. + else if header.gas_limit < MINIMUM_GAS_LIMIT { + return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit }) + } + + Ok(()) + } } impl Consensus for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_standalone(header, &self.chain_spec)?; + validate_header_gas(header)?; + validate_header_base_fee(header, &self.chain_spec)?; + + // EIP-4895: Beacon chain push withdrawals as operations + if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && + header.withdrawals_root.is_none() + { + return Err(ConsensusError::WithdrawalsRootMissing) + } else if !self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && + header.withdrawals_root.is_some() + { + return Err(ConsensusError::WithdrawalsRootUnexpected) + } + + // Ensures that EIP-4844 fields are valid once cancun is active. + if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + validate_4844_header_standalone(header)?; + } else if header.blob_gas_used.is_some() { + return Err(ConsensusError::BlobGasUsedUnexpected) + } else if header.excess_blob_gas.is_some() { + return Err(ConsensusError::ExcessBlobGasUnexpected) + } else if header.parent_beacon_block_root.is_some() { + return Err(ConsensusError::ParentBeaconBlockRootUnexpected) + } + + if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { + if header.requests_root.is_none() { + return Err(ConsensusError::RequestsRootMissing) + } + } else if header.requests_root.is_some() { + return Err(ConsensusError::RequestsRootUnexpected) + } + Ok(()) } @@ -48,32 +128,37 @@ impl Consensus for EthBeaconConsensus { header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?; + validate_against_parent_hash_number(header, parent)?; + + validate_against_parent_timestamp(header, parent)?; + + // TODO Check difficulty increment between parent and self + // Ace age did increment it by some formula that we need to follow. + self.validate_against_parent_gas_limit(header, parent)?; + + validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + + // ensure that the blob gas fields for this block + if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + validate_against_parent_4844(header, parent)?; + } + Ok(()) } - #[allow(unused_assignments)] - #[allow(unused_mut)] fn validate_header_with_total_difficulty( &self, header: &Header, total_difficulty: U256, ) -> Result<(), ConsensusError> { - let mut is_post_merge = self + let is_post_merge = self .chain_spec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(total_difficulty, header.difficulty); - #[cfg(feature = "optimism")] - { - // If OP-Stack then bedrock activation number determines when TTD (eth Merge) has been - // reached. - is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); - } - if is_post_merge { - if !self.chain_spec.is_optimism() && !header.is_zero_difficulty() { - return Err(ConsensusError::TheMergeDifficultyIsNotZero); + if !header.is_zero_difficulty() { + return Err(ConsensusError::TheMergeDifficultyIsNotZero) } if header.nonce != 0 { @@ -113,10 +198,9 @@ impl Consensus for EthBeaconConsensus { }); } - // Goerli and early OP exception: - // * If the network is goerli pre-merge, ignore the extradata check, since we do not - // support clique. Same goes for OP blocks below Bedrock. - if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { + // Early OP exception: + // * If the network is pre-Bedrock OP, ignore the extradata check. + if !self.chain_spec.is_optimism() { validate_header_extradata(header)?; } } @@ -136,3 +220,97 @@ impl Consensus for EthBeaconConsensus { validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) } } + +#[cfg(test)] +mod tests { + use super::*; + use reth_chainspec::ChainSpecBuilder; + use reth_primitives::{proofs, B256}; + + fn header_with_gas_limit(gas_limit: u64) -> SealedHeader { + let header = Header { gas_limit, ..Default::default() }; + header.seal(B256::ZERO) + } + + #[test] + fn test_valid_gas_limit_increase() { + let parent = header_with_gas_limit(1024 * 10); + let child = header_with_gas_limit(parent.gas_limit + 5); + + assert_eq!( + EthBeaconConsensus::new(Arc::new(ChainSpec::default())) + .validate_against_parent_gas_limit(&child, &parent), + Ok(()) + ); + } + + #[test] + fn test_gas_limit_below_minimum() { + let parent = header_with_gas_limit(MINIMUM_GAS_LIMIT); + let child = header_with_gas_limit(MINIMUM_GAS_LIMIT - 1); + + assert_eq!( + EthBeaconConsensus::new(Arc::new(ChainSpec::default())) + .validate_against_parent_gas_limit(&child, &parent), + Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: child.gas_limit }) + ); + } + + #[test] + fn test_invalid_gas_limit_increase_exceeding_limit() { + let parent = header_with_gas_limit(1024 * 10); + let child = header_with_gas_limit(parent.gas_limit + parent.gas_limit / 1024 + 1); + + assert_eq!( + EthBeaconConsensus::new(Arc::new(ChainSpec::default())) + .validate_against_parent_gas_limit(&child, &parent), + Err(ConsensusError::GasLimitInvalidIncrease { + parent_gas_limit: parent.gas_limit, + child_gas_limit: child.gas_limit, + }) + ); + } + + #[test] + fn test_valid_gas_limit_decrease_within_limit() { + let parent = header_with_gas_limit(1024 * 10); + let child = header_with_gas_limit(parent.gas_limit - 5); + + assert_eq!( + EthBeaconConsensus::new(Arc::new(ChainSpec::default())) + .validate_against_parent_gas_limit(&child, &parent), + Ok(()) + ); + } + + #[test] + fn test_invalid_gas_limit_decrease_exceeding_limit() { + let parent = header_with_gas_limit(1024 * 10); + let child = header_with_gas_limit(parent.gas_limit - parent.gas_limit / 1024 - 1); + + assert_eq!( + EthBeaconConsensus::new(Arc::new(ChainSpec::default())) + .validate_against_parent_gas_limit(&child, &parent), + Err(ConsensusError::GasLimitInvalidDecrease { + parent_gas_limit: parent.gas_limit, + child_gas_limit: child.gas_limit, + }) + ); + } + + #[test] + fn shanghai_block_zero_withdrawals() { + // ensures that if shanghai is activated, and we include a block with a withdrawals root, + // that the header is valid + let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); + + let header = Header { + base_fee_per_gas: Some(1337u64), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + ..Default::default() + } + .seal_slow(); + + assert_eq!(EthBeaconConsensus::new(chain_spec).validate_header(&header), Ok(())); + } +} diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 1a7c777075b8..6e3a6d56fcb0 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,7 @@ +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ - gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, Request, - B256, + gas_spent_by_transactions, BlockWithSenders, Bloom, GotExpected, Receipt, Request, B256, }; /// Validate a block with regard to execution results: @@ -14,6 +14,16 @@ pub fn validate_block_post_execution( receipts: &[Receipt], requests: &[Request], ) -> Result<(), ConsensusError> { + // Check if gas used matches the value set in header. + let cumulative_gas_used = + receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); + if block.gas_used != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: gas_spent_by_transactions(receipts), + }) + } + // Before Byzantium, receipts contained state root that would mean that expensive // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. @@ -27,16 +37,6 @@ pub fn validate_block_post_execution( } } - // Check if gas used matches the value set in header. - let cumulative_gas_used = - receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); - if block.gas_used != cumulative_gas_used { - return Err(ConsensusError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: gas_spent_by_transactions(receipts), - }); - } - // Validate that the header requests root matches the calculated requests root if chain_spec.is_prague_active_at_timestamp(block.timestamp) { let Some(header_requests_root) = block.header.requests_root else { diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index c91e2086effd..8a1f25808937 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -12,6 +12,8 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true +reth-evm-ethereum.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 441f83f9ac8a..fe4a050fa41b 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -10,14 +10,13 @@ mod payload; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; - +use reth_chainspec::ChainSpec; use reth_engine_primitives::EngineTypes; use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, - PayloadOrAttributes, + PayloadOrAttributes, PayloadTypes, }; -use reth_primitives::ChainSpec; -use reth_rpc_types::{ +pub use reth_rpc_types::{ engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, PayloadAttributes as EthPayloadAttributes, @@ -30,10 +29,13 @@ use reth_rpc_types::{ #[non_exhaustive] pub struct EthEngineTypes; -impl EngineTypes for EthEngineTypes { +impl PayloadTypes for EthEngineTypes { + type BuiltPayload = EthBuiltPayload; type PayloadAttributes = EthPayloadAttributes; type PayloadBuilderAttributes = EthPayloadBuilderAttributes; - type BuiltPayload = EthBuiltPayload; +} + +impl EngineTypes for EthEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index a802622002ab..f9fde7028e32 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,10 +1,12 @@ //! Contains types required for building a payload. use alloy_rlp::Encodable; +use reth_chainspec::ChainSpec; +use reth_evm_ethereum::revm_spec_by_timestamp_after_merge; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, revm::config::revm_spec_by_timestamp_after_merge, Address, - BlobTransactionSidecar, ChainSpec, Hardfork, Header, SealedBlock, Withdrawals, B256, U256, + constants::EIP1559_INITIAL_BASE_FEE, Address, BlobTransactionSidecar, EthereumHardfork, Header, + SealedBlock, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -39,7 +41,7 @@ pub struct EthBuiltPayload { impl EthBuiltPayload { /// Initializes the payload with the given initial block. - pub fn new(id: PayloadId, block: SealedBlock, fees: U256) -> Self { + pub const fn new(id: PayloadId, block: SealedBlock, fees: U256) -> Self { Self { id, block, fees, sidecars: Vec::new() } } @@ -265,7 +267,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { // If we are on the London fork boundary, we need to multiply the parent's gas limit by the // elasticity multiplier to get the new gas limit. - if chain_spec.fork(Hardfork::London).transitions_at_block(parent.number + 1) { + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(parent.number + 1) { let elasticity_multiplier = chain_spec.base_fee_params_at_timestamp(self.timestamp()).elasticity_multiplier; diff --git a/crates/ethereum/engine/Cargo.toml b/crates/ethereum/engine/Cargo.toml new file mode 100644 index 000000000000..05fbc4386cde --- /dev/null +++ b/crates/ethereum/engine/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "reth-ethereum-engine" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-beacon-consensus.workspace = true +reth-chainspec.workspace = true +reth-db-api.workspace = true +reth-engine-tree.workspace = true +reth-ethereum-engine-primitives.workspace = true +reth-network-p2p.workspace = true +reth-stages-api.workspace = true +reth-tasks.workspace = true + +# async +futures.workspace = true +pin-project.workspace = true +tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true + +[dev-dependencies] +reth-engine-tree = { workspace = true, features = ["test-utils"] } diff --git a/crates/net/common/src/lib.rs b/crates/ethereum/engine/src/lib.rs similarity index 74% rename from crates/net/common/src/lib.rs rename to crates/ethereum/engine/src/lib.rs index 9706d36620b4..8cb60de5925b 100644 --- a/crates/net/common/src/lib.rs +++ b/crates/ethereum/engine/src/lib.rs @@ -1,4 +1,4 @@ -//! Shared types across `reth-net`. +//! Ethereum engine implementation. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -8,9 +8,5 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod ban_list; - -/// Traits related to tokio streams -pub mod stream; - -pub mod ratelimit; +/// Ethereum engine service. +pub mod service; diff --git a/crates/ethereum/engine/src/service.rs b/crates/ethereum/engine/src/service.rs new file mode 100644 index 000000000000..0abf352eeab7 --- /dev/null +++ b/crates/ethereum/engine/src/service.rs @@ -0,0 +1,138 @@ +use futures::{ready, StreamExt}; +use pin_project::pin_project; +use reth_beacon_consensus::{BeaconEngineMessage, EthBeaconConsensus}; +use reth_chainspec::ChainSpec; +use reth_db_api::database::Database; +use reth_engine_tree::{ + backfill::PipelineSync, + chain::ChainOrchestrator, + download::BasicBlockDownloader, + engine::{EngineApiEvent, EngineApiRequestHandler, EngineHandler, FromEngine}, +}; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; +use reth_stages_api::Pipeline; +use reth_tasks::TaskSpawner; +use std::{ + future::Future, + pin::Pin, + sync::{mpsc::Sender, Arc}, + task::{Context, Poll}, +}; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Alias for Ethereum chain orchestrator. +type EthServiceType = ChainOrchestrator< + EngineHandler< + EngineApiRequestHandler, + UnboundedReceiverStream>, + BasicBlockDownloader, + >, + PipelineSync, +>; + +/// The type that drives the Ethereum chain forward and communicates progress. +#[pin_project] +#[allow(missing_debug_implementations)] +pub struct EthService +where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + orchestrator: EthServiceType, +} + +impl EthService +where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// Constructor for `EthService`. + pub fn new( + chain_spec: Arc, + client: Client, + to_tree: Sender>>, + from_tree: UnboundedReceiver, + incoming_requests: UnboundedReceiverStream>, + pipeline: Pipeline, + pipeline_task_spawner: Box, + ) -> Self { + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); + let downloader = BasicBlockDownloader::new(client, consensus); + + let engine_handler = EngineApiRequestHandler::new(to_tree, from_tree); + let handler = EngineHandler::new(engine_handler, downloader, incoming_requests); + + let backfill_sync = PipelineSync::new(pipeline, pipeline_task_spawner); + + Self { orchestrator: ChainOrchestrator::new(handler, backfill_sync) } + } +} + +impl Future for EthService +where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + type Output = Result<(), EthServiceError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Call poll on the inner orchestrator. + let mut orchestrator = self.project().orchestrator; + loop { + match ready!(StreamExt::poll_next_unpin(&mut orchestrator, cx)) { + Some(_event) => continue, + None => return Poll::Ready(Ok(())), + } + } + } +} + +/// Potential error returned by `EthService`. +#[derive(Debug)] +pub struct EthServiceError {} + +#[cfg(test)] +mod tests { + use super::*; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_engine_tree::test_utils::TestPipelineBuilder; + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_tasks::TokioTaskExecutor; + use std::sync::{mpsc::channel, Arc}; + use tokio::sync::mpsc::unbounded_channel; + + #[test] + fn eth_chain_orchestrator_build() { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + let client = TestFullBlockClient::default(); + + let (_tx, rx) = unbounded_channel::>(); + let incoming_requests = UnboundedReceiverStream::new(rx); + + let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); + let pipeline_task_spawner = Box::::default(); + + let (to_tree_tx, _to_tree_rx) = channel(); + let (_from_tree_tx, from_tree_rx) = unbounded_channel(); + + let _eth_chain_orchestrator = EthService::new( + chain_spec, + client, + to_tree_tx, + from_tree_rx, + incoming_requests, + pipeline, + pipeline_task_spawner, + ); + } +} diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 410750c80b6f..7ea2e4b587c9 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -12,6 +12,8 @@ workspace = true [dependencies] # Reth +reth-chainspec.workspace = true +reth-ethereum-forks.workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true @@ -32,3 +34,6 @@ reth-revm = { workspace = true, features = ["test-utils"] } secp256k1.workspace = true serde_json.workspace = true +[features] +default = ["std"] +std = [] \ No newline at end of file diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs new file mode 100644 index 000000000000..77082b1f7d60 --- /dev/null +++ b/crates/ethereum/evm/src/config.rs @@ -0,0 +1,220 @@ +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_ethereum_forks::{EthereumHardfork, Head}; + +/// Returns the spec id at the given timestamp. +/// +/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// timestamp. +pub fn revm_spec_by_timestamp_after_merge( + chain_spec: &ChainSpec, + timestamp: u64, +) -> revm_primitives::SpecId { + if chain_spec.is_prague_active_at_timestamp(timestamp) { + revm_primitives::PRAGUE + } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { + revm_primitives::CANCUN + } else if chain_spec.is_shanghai_active_at_timestamp(timestamp) { + revm_primitives::SHANGHAI + } else { + revm_primitives::MERGE + } +} + +/// return `revm_spec` from spec configuration. +pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { + if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { + revm_primitives::PRAGUE + } else if chain_spec.fork(EthereumHardfork::Cancun).active_at_head(block) { + revm_primitives::CANCUN + } else if chain_spec.fork(EthereumHardfork::Shanghai).active_at_head(block) { + revm_primitives::SHANGHAI + } else if chain_spec.fork(EthereumHardfork::Paris).active_at_head(block) { + revm_primitives::MERGE + } else if chain_spec.fork(EthereumHardfork::London).active_at_head(block) { + revm_primitives::LONDON + } else if chain_spec.fork(EthereumHardfork::Berlin).active_at_head(block) { + revm_primitives::BERLIN + } else if chain_spec.fork(EthereumHardfork::Istanbul).active_at_head(block) { + revm_primitives::ISTANBUL + } else if chain_spec.fork(EthereumHardfork::Petersburg).active_at_head(block) { + revm_primitives::PETERSBURG + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_head(block) { + revm_primitives::BYZANTIUM + } else if chain_spec.fork(EthereumHardfork::SpuriousDragon).active_at_head(block) { + revm_primitives::SPURIOUS_DRAGON + } else if chain_spec.fork(EthereumHardfork::Tangerine).active_at_head(block) { + revm_primitives::TANGERINE + } else if chain_spec.fork(EthereumHardfork::Homestead).active_at_head(block) { + revm_primitives::HOMESTEAD + } else if chain_spec.fork(EthereumHardfork::Frontier).active_at_head(block) { + revm_primitives::FRONTIER + } else { + panic!( + "invalid hardfork chainspec: expected at least one hardfork, got {:?}", + chain_spec.hardforks + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::U256; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + + #[test] + fn test_revm_spec_by_timestamp_after_merge() { + assert_eq!( + revm_spec_by_timestamp_after_merge( + &ChainSpecBuilder::mainnet().cancun_activated().build(), + 0 + ), + revm_primitives::CANCUN + ); + assert_eq!( + revm_spec_by_timestamp_after_merge( + &ChainSpecBuilder::mainnet().shanghai_activated().build(), + 0 + ), + revm_primitives::SHANGHAI + ); + assert_eq!( + revm_spec_by_timestamp_after_merge(&ChainSpecBuilder::mainnet().build(), 0), + revm_primitives::MERGE + ); + } + + #[test] + fn test_to_revm_spec() { + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), &Head::default()), + revm_primitives::CANCUN + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().shanghai_activated().build(), &Head::default()), + revm_primitives::SHANGHAI + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), &Head::default()), + revm_primitives::MERGE + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), &Head::default()), + revm_primitives::LONDON + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), &Head::default()), + revm_primitives::BERLIN + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().istanbul_activated().build(), &Head::default()), + revm_primitives::ISTANBUL + ); + assert_eq!( + revm_spec( + &ChainSpecBuilder::mainnet().petersburg_activated().build(), + &Head::default() + ), + revm_primitives::PETERSBURG + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().byzantium_activated().build(), &Head::default()), + revm_primitives::BYZANTIUM + ); + assert_eq!( + revm_spec( + &ChainSpecBuilder::mainnet().spurious_dragon_activated().build(), + &Head::default() + ), + revm_primitives::SPURIOUS_DRAGON + ); + assert_eq!( + revm_spec( + &ChainSpecBuilder::mainnet().tangerine_whistle_activated().build(), + &Head::default() + ), + revm_primitives::TANGERINE + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().homestead_activated().build(), &Head::default()), + revm_primitives::HOMESTEAD + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().frontier_activated().build(), &Head::default()), + revm_primitives::FRONTIER + ); + } + + #[test] + fn test_eth_spec() { + assert_eq!( + revm_spec(&MAINNET, &Head { timestamp: 1710338135, ..Default::default() }), + revm_primitives::CANCUN + ); + assert_eq!( + revm_spec(&MAINNET, &Head { timestamp: 1681338455, ..Default::default() }), + revm_primitives::SHANGHAI + ); + + assert_eq!( + revm_spec( + &MAINNET, + &Head { + total_difficulty: U256::from(58_750_000_000_000_000_000_010_u128), + difficulty: U256::from(10_u128), + ..Default::default() + } + ), + revm_primitives::MERGE + ); + // TTD trumps the block number + assert_eq!( + revm_spec( + &MAINNET, + &Head { + number: 15537394 - 10, + total_difficulty: U256::from(58_750_000_000_000_000_000_010_u128), + difficulty: U256::from(10_u128), + ..Default::default() + } + ), + revm_primitives::MERGE + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 15537394 - 10, ..Default::default() }), + revm_primitives::LONDON + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 12244000 + 10, ..Default::default() }), + revm_primitives::BERLIN + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 12244000 - 10, ..Default::default() }), + revm_primitives::ISTANBUL + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 7280000 + 10, ..Default::default() }), + revm_primitives::PETERSBURG + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 7280000 - 10, ..Default::default() }), + revm_primitives::BYZANTIUM + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 2675000 + 10, ..Default::default() }), + revm_primitives::SPURIOUS_DRAGON + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 2675000 - 10, ..Default::default() }), + revm_primitives::TANGERINE + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 1150000 + 10, ..Default::default() }), + revm_primitives::HOMESTEAD + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 1150000 - 10, ..Default::default() }), + revm_primitives::FRONTIER + ); + } +} diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index 074cd1f0ba5b..722c38da76dc 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,10 +1,14 @@ //! EIP-6110 deposit requests parsing use alloy_eips::eip6110::{DepositRequest, MAINNET_DEPOSIT_CONTRACT_ADDRESS}; use alloy_sol_types::{sol, SolEvent}; +use reth_chainspec::ChainSpec; use reth_evm::execute::BlockValidationError; -use reth_primitives::{ChainSpec, Receipt, Request}; +use reth_primitives::{Receipt, Request}; use revm_primitives::Log; +#[cfg(not(feature = "std"))] +use alloc::{string::ToString, vec::Vec}; + sol! { #[allow(missing_docs)] event DepositEvent( @@ -85,7 +89,8 @@ fn parse_deposit_from_log(log: &Log) -> DepositRequest { #[cfg(test)] mod tests { use super::*; - use reth_primitives::{TxType, MAINNET}; + use reth_chainspec::MAINNET; + use reth_primitives::TxType; #[test] fn test_parse_deposit_from_log() { diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 8b2f46460353..1cf830e63fbb 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,35 +4,37 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; +use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, + system_calls::{ + apply_beacon_root_contract_call, apply_consolidation_requests_contract_call, + apply_withdrawal_requests_contract_call, + }, ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, Receipt, Request, Withdrawals, - MAINNET, U256, + BlockNumber, BlockWithSenders, EthereumHardfork, Header, Receipt, Request, U256, }; use reth_prune_types::PruneModes; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - state_change::{ - apply_beacon_root_contract_call, apply_blockhashes_update, - apply_withdrawal_requests_contract_call, post_block_balance_increments, - }, + state_change::{apply_blockhashes_update, post_block_balance_increments}, Evm, State, }; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, }; -use std::sync::Arc; +#[cfg(feature = "std")] +use std::{fmt::Display, sync::Arc, vec, vec::Vec}; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] pub struct EthExecutorProvider { @@ -54,7 +56,7 @@ impl EthExecutorProvider { impl EthExecutorProvider { /// Creates a new executor provider. - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } @@ -65,7 +67,7 @@ where { fn eth_executor(&self, db: DB) -> EthBlockExecutor where - DB: Database, + DB: Database>, { EthBlockExecutor::new( self.chain_spec.clone(), @@ -79,25 +81,27 @@ impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, { - type Executor> = EthBlockExecutor; + type Executor + Display>> = + EthBlockExecutor; - type BatchExecutor> = EthBatchExecutor; + type BatchExecutor + Display>> = + EthBatchExecutor; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { self.eth_executor(db) } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { let executor = self.eth_executor(db); EthBatchExecutor { executor, - batch_record: BlockBatchRecord::new(prune_modes), + batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), } } @@ -140,10 +144,12 @@ where mut evm: Evm<'_, Ext, &mut State>, ) -> Result where - DB: Database, + DB: Database, + DB::Error: Into + std::fmt::Display, { // apply pre execution changes apply_beacon_root_contract_call( + &self.evm_config, &self.chain_spec, block.timestamp, block.number, @@ -173,14 +179,21 @@ where .into()); } - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), - error: err.into(), + error: Box::new(new_err), } })?; evm.db_mut().commit(state); @@ -210,9 +223,14 @@ where crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; // Collect all EIP-7685 requests - let withdrawal_requests = apply_withdrawal_requests_contract_call(&mut evm)?; + let withdrawal_requests = + apply_withdrawal_requests_contract_call(&self.evm_config, &mut evm)?; + + // Collect all EIP-7251 requests + let consolidation_requests = + apply_consolidation_requests_contract_call(&self.evm_config, &mut evm)?; - [deposit_requests, withdrawal_requests].concat() + [deposit_requests, withdrawal_requests, consolidation_requests].concat() } else { vec![] }; @@ -236,7 +254,7 @@ pub struct EthBlockExecutor { impl EthBlockExecutor { /// Creates a new Ethereum block executor. - pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { + pub const fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { Self { executor: EthEvmExecutor { chain_spec, evm_config }, state } } @@ -255,7 +273,7 @@ impl EthBlockExecutor { impl EthBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + Display>, { /// Configures a new evm configuration and block environment for the given block. /// @@ -265,7 +283,7 @@ where fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); let mut block_env = BlockEnv::default(); - EvmConfig::fill_cfg_and_block_env( + self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, self.chain_spec(), @@ -317,19 +335,11 @@ where block: &BlockWithSenders, total_difficulty: U256, ) -> Result<(), BlockExecutionError> { - let mut balance_increments = post_block_balance_increments( - self.chain_spec(), - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); + let mut balance_increments = + post_block_balance_increments(self.chain_spec(), block, total_difficulty); // Irregular state change at Ethereum DAO hardfork - if self.chain_spec().fork(Hardfork::Dao).transitions_at_block(block.number) { + if self.chain_spec().fork(EthereumHardfork::Dao).transitions_at_block(block.number) { // drain balances from hardcoded addresses. let drained_balance: u128 = self .state @@ -353,19 +363,17 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; type Error = BlockExecutionError; - /// Executes the block and commits the state changes. + /// Executes the block and commits the changes to the internal state. /// /// Returns the receipts of the transactions in the block. /// /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; let EthExecuteOutput { receipts, requests, gas_used } = @@ -403,7 +411,7 @@ impl EthBatchExecutor { impl BatchExecutor for EthBatchExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = ExecutionOutcome; @@ -448,6 +456,10 @@ where self.batch_record.set_tip(tip); } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); + } + fn size_hint(&self) -> Option { Some(self.executor.state.bundle_state.size_hint()) } @@ -461,17 +473,16 @@ mod tests { eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, }; + use reth_chainspec::{ChainSpecBuilder, ForkCondition}; use reth_primitives::{ constants::{EMPTY_ROOT_HASH, ETH_TO_WEI}, - keccak256, public_key_to_address, Account, Block, ChainSpecBuilder, ForkCondition, - Transaction, TxKind, TxLegacy, B256, + keccak256, public_key_to_address, Account, Block, Transaction, TxKind, TxLegacy, B256, }; use reth_revm::{ - database::StateProviderDatabase, state_change::HISTORY_SERVE_WINDOW, - test_utils::StateProviderTest, TransitionState, + database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm_primitives::{b256, fixed_bytes, Bytes}; + use revm_primitives::{b256, fixed_bytes, Bytes, BLOCKHASH_SERVE_WINDOW}; use secp256k1::{Keypair, Secp256k1}; use std::collections::HashMap; @@ -527,7 +538,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); @@ -624,7 +635,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); @@ -632,7 +643,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail provider - .batch_executor(StateProviderDatabase::new(&db), PruneModes::none()) + .batch_executor(StateProviderDatabase::new(&db)) .execute_and_verify_one( ( &BlockWithSenders { @@ -667,7 +678,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); @@ -682,8 +693,7 @@ mod tests { ..Header::default() }; - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -720,14 +730,13 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) .build(), ); let mut header = chain_spec.genesis_header(); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -807,15 +816,14 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); let provider = executor_provider(chain_spec); // execute header - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // Now execute a block with the fixed header, ensure that it does not fail executor @@ -877,13 +885,12 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Never) + .with_fork(EthereumHardfork::Prague, ForkCondition::Never) .build(), ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // construct the header for block one let header = Header { timestamp: 1, number: 1, ..Header::default() }; @@ -930,14 +937,13 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); let header = chain_spec.genesis_header(); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute genesis block, this should not fail executor @@ -976,13 +982,13 @@ mod tests { #[test] fn eip_2935_fork_activation_within_window_bounds() { - let fork_activation_block = HISTORY_SERVE_WINDOW - 10; + let fork_activation_block = (BLOCKHASH_SERVE_WINDOW - 10) as u64; let db = create_state_provider_with_block_hashes(fork_activation_block); let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) .build(), ); @@ -994,8 +1000,7 @@ mod tests { ..Header::default() }; let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute the fork activation block, this should not fail executor @@ -1039,19 +1044,18 @@ mod tests { #[test] fn eip_2935_fork_activation_outside_window_bounds() { - let fork_activation_block = HISTORY_SERVE_WINDOW + 256; + let fork_activation_block = (BLOCKHASH_SERVE_WINDOW + 256) as u64; let db = create_state_provider_with_block_hashes(fork_activation_block); let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) .build(), ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); let header = Header { parent_hash: B256::random(), @@ -1090,7 +1094,7 @@ mod tests { .state_mut() .storage( HISTORY_STORAGE_ADDRESS, - U256::from(fork_activation_block % HISTORY_SERVE_WINDOW - 1) + U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) ) .unwrap(), U256::ZERO @@ -1104,7 +1108,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); @@ -1113,8 +1117,7 @@ mod tests { let header_hash = header.hash_slow(); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute the genesis block, this should not fail executor @@ -1241,7 +1244,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); @@ -1312,7 +1315,7 @@ mod tests { let request = requests.first().unwrap(); let withdrawal_request = request.as_withdrawal_request().unwrap(); assert_eq!(withdrawal_request.source_address, sender_address); - assert_eq!(withdrawal_request.validator_public_key, validator_public_key); + assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); } @@ -1322,7 +1325,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index b6deece37134..cd8398ebe963 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -7,14 +7,19 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(not(feature = "std"))] +extern crate alloc; + +use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm::{config::revm_spec, env::fill_tx_env}, - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, ChainSpec, Head, Header, TransactionSigned, U256, -}; +use reth_primitives::{transaction::FillTxEnv, Address, Header, TransactionSigned, U256}; use reth_revm::{Database, EvmBuilder}; +use revm_primitives::{AnalysisKind, Bytes, CfgEnvWithHandlerCfg, Env, TxEnv, TxKind}; + +mod config; +pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; pub mod execute; @@ -30,19 +35,16 @@ pub mod eip6110; pub struct EthEvmConfig; impl ConfigureEvmEnv for EthEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - fill_tx_env(tx_env, transaction, sender) - } - fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - let spec_id = revm_spec( + let spec_id = config::revm_spec( chain_spec, - Head { + &Head { number: header.number, timestamp: header.timestamp, difficulty: header.difficulty, @@ -56,6 +58,50 @@ impl ConfigureEvmEnv for EthEvmConfig { cfg_env.handler_cfg.spec_id = spec_id; } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + transaction.fill_tx_env(tx_env, sender); + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + #[allow(clippy::needless_update)] // side-effect of optimism fields + let tx = TxEnv { + caller, + transact_to: TxKind::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the + // call, and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from + // the `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + // TODO remove this once this crate is no longer built with optimism + ..Default::default() + }; + env.tx = tx; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; + } } impl ConfigureEvm for EthEvmConfig { @@ -72,7 +118,12 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; - use reth_primitives::revm_primitives::{BlockEnv, CfgEnv, SpecId}; + use reth_chainspec::ChainSpec; + use reth_primitives::{ + revm_primitives::{BlockEnv, CfgEnv, SpecId}, + Header, U256, + }; + use revm_primitives::CfgEnvWithHandlerCfg; #[test] #[ignore] @@ -83,7 +134,7 @@ mod tests { let chain_spec = ChainSpec::default(); let total_difficulty = U256::ZERO; - EthEvmConfig::fill_cfg_and_block_env( + EthEvmConfig::default().fill_cfg_and_block_env( &mut cfg_env, &mut block_env, &chain_spec, diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 072f91b28fd6..f053b35b911c 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -22,20 +22,28 @@ reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm-ethereum.workspace = true +reth-consensus.workspace = true +reth-auto-seal-consensus.workspace = true +reth-beacon-consensus.workspace = true # misc eyre.workspace = true [dev-dependencies] reth.workspace = true +reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-primitives.workspace = true reth-e2e-test-utils.workspace = true +alloy-primitives.workspace = true +alloy-genesis.workspace = true futures.workspace = true tokio.workspace = true futures-util.workspace = true serde_json.workspace = true +[features] +default = [] +test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 96698ccfccd4..c3d8a9af55f4 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -1,15 +1,21 @@ //! Ethereum Node types config. use crate::{EthEngineTypes, EthEvmConfig}; +use reth_auto_seal_consensus::AutoSealConsensus; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_beacon_consensus::EthBeaconConsensus; +use reth_ethereum_engine_primitives::{ + EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, +}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; use reth_node_builder::{ components::{ - ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, + PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes}, - BuilderContext, Node, PayloadBuilderConfig, + BuilderContext, Node, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::CanonStateSubscriptions; @@ -18,6 +24,7 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; +use std::sync::Arc; /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] @@ -32,9 +39,15 @@ impl EthereumNode { EthereumPayloadBuilder, EthereumNetworkBuilder, EthereumExecutorBuilder, + EthereumConsensusBuilder, > where - Node: FullNodeTypes, + Node: FullNodeTypes, + ::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, { ComponentsBuilder::default() .node_types::() @@ -42,6 +55,7 @@ impl EthereumNode { .payload(EthereumPayloadBuilder::default()) .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) + .consensus(EthereumConsensusBuilder::default()) } } @@ -60,6 +74,7 @@ where EthereumPayloadBuilder, EthereumNetworkBuilder, EthereumExecutorBuilder, + EthereumConsensusBuilder, >; fn components_builder(self) -> Self::ComponentsBuilder { @@ -171,8 +186,13 @@ pub struct EthereumPayloadBuilder; impl PayloadServiceBuilder for EthereumPayloadBuilder where - Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, + Node: FullNodeTypes, + ::Engine: PayloadTypes< + BuiltPayload = EthBuiltPayload, + PayloadAttributes = EthPayloadAttributes, + PayloadBuilderAttributes = EthPayloadBuilderAttributes, + >, { async fn spawn_payload_service( self, @@ -227,3 +247,24 @@ where Ok(handle) } } + +/// A basic ethereum consensus builder. +#[derive(Debug, Default, Clone, Copy)] +pub struct EthereumConsensusBuilder { + // TODO add closure to modify consensus +} + +impl ConsensusBuilder for EthereumConsensusBuilder +where + Node: FullNodeTypes, +{ + type Consensus = Arc; + + async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { + if ctx.is_dev() { + Ok(Arc::new(AutoSealConsensus::new(ctx.chain_spec()))) + } else { + Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) + } + } +} diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 3411d6db5218..9390b34f444a 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,16 +1,18 @@ use std::sync::Arc; +use alloy_genesis::Genesis; +use alloy_primitives::b256; use reth::{ args::RpcServerArgs, builder::{NodeBuilder, NodeConfig, NodeHandle}, rpc::types::engine::PayloadStatusEnum, tasks::TaskManager, }; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; use reth_node_ethereum::EthereumNode; -use reth_primitives::{b256, ChainSpecBuilder, Genesis, MAINNET}; use reth_transaction_pool::TransactionPool; use crate::utils::eth_payload_attributes; diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 4570a8c0e122..0e289cfd3b75 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,10 +1,14 @@ -use crate::utils::EthNode; +use std::sync::Arc; + +use alloy_genesis::Genesis; +use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::rpc::eth::EthTransactions; +use reth::rpc::api::eth::helpers::EthTransactions; +use reth_chainspec::ChainSpec; use reth_e2e_test_utils::setup; -use reth_primitives::{b256, hex, ChainSpec, Genesis}; use reth_provider::CanonStateSubscriptions; -use std::sync::Arc; + +use crate::utils::EthNode; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { @@ -15,7 +19,7 @@ async fn can_run_dev_node() -> eyre::Result<()> { Ok(()) } -async fn assert_chain_advances(mut node: EthNode) { +async fn assert_chain_advances(node: EthNode) { let mut notifications = node.inner.provider.canonical_state_stream(); // submit tx through rpc diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index 6153a55d7f65..8e6938b47fe4 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -1,14 +1,15 @@ use crate::utils::eth_payload_attributes; +use alloy_genesis::Genesis; use reth::{ args::RpcServerArgs, builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, }; use reth_node_ethereum::EthereumNode; -use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; #[tokio::test] diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index c5d00b824c57..a40c1b3f4b4e 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,7 +1,7 @@ use crate::utils::eth_payload_attributes; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; -use reth_primitives::{ChainSpecBuilder, MAINNET}; use std::sync::Arc; #[tokio::test] diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 2c1dc373b82e..001cf02ce017 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,8 +1,8 @@ +use alloy_primitives::{Address, B256}; use reth::rpc::types::engine::PayloadAttributes; use reth_e2e_test_utils::NodeHelperType; use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, B256}; /// Ethereum Node Helper type pub(crate) type EthNode = NodeHelperType; diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 883752a9681c..e41c8a407c33 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -18,6 +18,7 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-execution-types.workspace = true reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index fad345bc8854..e6d4b3ffc614 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,13 +10,18 @@ #![allow(clippy::useless_let_if_seq)] use reth_basic_payload_builder::{ - commit_withdrawals, is_better_payload, post_block_withdrawal_requests_contract_call, - pre_block_beacon_root_contract_call, BuildArguments, BuildOutcome, PayloadBuilder, + commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig, WithdrawalsOutcome, }; use reth_errors::RethError; -use reth_evm::ConfigureEvm; +use reth_evm::{ + system_calls::{ + post_block_withdrawal_requests_contract_call, pre_block_beacon_root_contract_call, + }, + ConfigureEvm, +}; use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; +use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, }; @@ -26,10 +31,10 @@ use reth_primitives::{ }, eip4844::calculate_excess_blob_gas, proofs::{self, calculate_requests_root}, - revm::env::tx_env_with_recovered, - Block, Header, IntoRecoveredTransaction, Receipt, EMPTY_OMMER_ROOT_HASH, U256, + Block, EthereumHardforks, Header, IntoRecoveredTransaction, Receipt, EMPTY_OMMER_ROOT_HASH, + U256, }; -use reth_provider::{ExecutionOutcome, StateProviderFactory}; +use reth_provider::StateProviderFactory; use reth_revm::{database::StateProviderDatabase, state_change::apply_blockhashes_update}; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use revm::{ @@ -113,11 +118,13 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &self.evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, + block_number, + attributes.timestamp, + attributes.parent_beacon_block_root, ) .map_err(|err| { warn!(target: "payload_builder", @@ -125,7 +132,7 @@ where %err, "failed to apply beacon root contract call for empty payload" ); - err + PayloadBuilderError::Internal(err.into()) })?; // apply eip-2935 blockhashes update @@ -188,22 +195,25 @@ where } // Calculate the requests and the requests root. - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { - // We do not calculate the EIP-6110 deposit requests because there are no - // transactions in an empty payload. - let withdrawal_requests = post_block_withdrawal_requests_contract_call( - &mut db, - &initialized_cfg, - &initialized_block_env, - )?; - - let requests = withdrawal_requests; - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) - } else { - (None, None) - }; + let (requests, requests_root) = if chain_spec + .is_prague_active_at_timestamp(attributes.timestamp) + { + // We do not calculate the EIP-6110 deposit requests because there are no + // transactions in an empty payload. + let withdrawal_requests = post_block_withdrawal_requests_contract_call::( + &self.evm_config, + &mut db, + &initialized_cfg, + &initialized_block_env, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; + + let requests = withdrawal_requests; + let requests_root = calculate_requests_root(&requests); + (Some(requests.into()), Some(requests_root)) + } else { + (None, None) + }; let header = Header { parent_hash: parent_block.hash(), @@ -287,12 +297,22 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, - )?; + block_number, + attributes.timestamp, + attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + PayloadBuilderError::Internal(err.into()) + })?; // apply eip-2935 blockhashes update apply_blockhashes_update( @@ -341,7 +361,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - tx_env_with_recovered(&tx), + evm_config.tx_env(&tx), ); // Configure the environment for the block. @@ -425,10 +445,12 @@ where let deposit_requests = parse_deposits_from_receipts(&chain_spec, receipts.iter().flatten()) .map_err(|err| PayloadBuilderError::Internal(RethError::Execution(err.into())))?; let withdrawal_requests = post_block_withdrawal_requests_contract_call( + &evm_config, &mut db, &initialized_cfg, &initialized_block_env, - )?; + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; let requests = [deposit_requests, withdrawal_requests].concat(); let requests_root = calculate_requests_root(&requests); diff --git a/crates/etl/Cargo.toml b/crates/etl/Cargo.toml index e05524306d6f..1ca10d620d04 100644 --- a/crates/etl/Cargo.toml +++ b/crates/etl/Cargo.toml @@ -13,4 +13,4 @@ reth-db-api.workspace = true rayon.workspace = true [dev-dependencies] -reth-primitives.workspace = true +alloy-primitives.workspace = true diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index 0793e585334e..e439dbfe30b7 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -164,6 +164,14 @@ where } } +/// Type alias for the items stored in the heap of [`EtlIter`]. +/// +/// Each item in the heap is a tuple containing: +/// - A `Reverse` tuple of a key-value pair (`Vec, Vec`), used to maintain the heap in +/// ascending order of keys. +/// - An index (`usize`) representing the source file from which the key-value pair was read. +type HeapItem = (Reverse<(Vec, Vec)>, usize); + /// `EtlIter` is an iterator for traversing through sorted key-value pairs in a collection of ETL /// files. These files are created using the [`Collector`] and contain data where keys are encoded /// and values are compressed. @@ -174,8 +182,7 @@ where #[derive(Debug)] pub struct EtlIter<'a> { /// Heap managing the next items to be iterated. - #[allow(clippy::type_complexity)] - heap: BinaryHeap<(Reverse<(Vec, Vec)>, usize)>, + heap: BinaryHeap, /// Reference to the vector of ETL files being iterated over. files: &'a mut Vec, } @@ -271,7 +278,7 @@ impl EtlFile { #[cfg(test)] mod tests { - use reth_primitives::{TxHash, TxNumber}; + use alloy_primitives::{TxHash, TxNumber}; use super::*; diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index e836148101ad..ab338371984b 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-execution-errors.workspace = true reth-primitives.workspace = true revm-primitives.workspace = true @@ -20,7 +21,7 @@ reth-storage-errors.workspace = true reth-execution-types.workspace = true revm.workspace = true - +alloy-eips.workspace = true auto_impl.workspace = true futures-util.workspace = true parking_lot = { workspace = true, optional = true } @@ -29,4 +30,6 @@ parking_lot = { workspace = true, optional = true } parking_lot.workspace = true [features] +default = ["std"] +std = [] test-utils = ["dep:parking_lot"] diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 4a23156848a3..8ec3a7024cb5 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -13,7 +13,16 @@ workspace = true [dependencies] # reth reth-consensus.workspace = true -reth-primitives.workspace = true reth-storage-errors.workspace = true reth-prune-types.workspace = true -thiserror.workspace = true + +alloy-primitives.workspace = true +alloy-eips.workspace = true +revm-primitives.workspace = true + +thiserror-no-std = { workspace = true, default-features = false } + + +[features] +default = ["std"] +std = ["thiserror-no-std/std"] \ No newline at end of file diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 3c5088028fbd..1fdee985606b 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -7,19 +7,26 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(not(feature = "std"))] +extern crate alloc; + +use alloy_eips::BlockNumHash; +use alloy_primitives::B256; use reth_consensus::ConsensusError; -use reth_primitives::{revm_primitives::EVMError, BlockNumHash, B256}; use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; -use std::fmt::Display; -use thiserror::Error; +use revm_primitives::EVMError; + +#[cfg(not(feature = "std"))] +use alloc::{boxed::Box, string::String}; pub mod trie; pub use trie::{StateRootError, StorageRootError}; /// Transaction validation errors -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[derive(thiserror_no_std::Error, Debug, Clone, PartialEq, Eq)] pub enum BlockValidationError { /// EVM error with transaction hash and message #[error("EVM reported invalid transaction ({hash}): {error}")] @@ -91,6 +98,14 @@ pub enum BlockValidationError { /// The error message. message: String, }, + /// EVM error during consolidation requests contract call [EIP-7251] + /// + /// [EIP-7251]: https://eips.ethereum.org/EIPS/eip-7251 + #[error("failed to apply consolidation requests contract call: {message}")] + ConsolidationRequestsContractCall { + /// The error message. + message: String, + }, /// Error when decoding deposit requests from receipts [EIP-6110] /// /// [EIP-6110]: https://eips.ethereum.org/EIPS/eip-6110 @@ -99,7 +114,7 @@ pub enum BlockValidationError { } /// `BlockExecutor` Errors -#[derive(Error, Debug)] +#[derive(thiserror_no_std::Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping `BlockValidationError` #[error(transparent)] @@ -119,7 +134,7 @@ pub enum BlockExecutionError { /// Transaction error on commit with inner details #[error("transaction error on commit: {inner}")] CanonicalCommit { - /// The inner error message + /// The inner error message. inner: String, }, /// Error when appending chain on fork is not possible @@ -136,12 +151,14 @@ pub enum BlockExecutionError { #[error(transparent)] LatestBlock(#[from] ProviderError), /// Arbitrary Block Executor Errors + #[cfg(feature = "std")] #[error(transparent)] Other(Box), } impl BlockExecutionError { /// Create a new `BlockExecutionError::Other` variant. + #[cfg(feature = "std")] pub fn other(error: E) -> Self where E: std::error::Error + Send + Sync + 'static, @@ -150,7 +167,8 @@ impl BlockExecutionError { } /// Create a new [`BlockExecutionError::Other`] from a given message. - pub fn msg(msg: impl Display) -> Self { + #[cfg(feature = "std")] + pub fn msg(msg: impl std::fmt::Display) -> Self { Self::Other(msg.to_string().into()) } diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index ee511611b998..fd3533977ab2 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,7 +1,7 @@ //! Errors when computing the state root. use reth_storage_errors::db::DatabaseError; -use thiserror::Error; +use thiserror_no_std::Error; /// State root errors. #[derive(Error, Debug, PartialEq, Eq, Clone)] diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 5f61a1eb8885..57181537c542 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,15 +12,19 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-chainspec = { workspace = true, optional = true } reth-execution-errors.workspace = true reth-trie.workspace = true revm.workspace = true +serde = { workspace = true, optional = true } + [dev-dependencies] reth-primitives = { workspace = true, features = ["test-utils"] } alloy-primitives.workspace = true alloy-eips.workspace = true [features] -optimism = [] \ No newline at end of file +optimism = ["dep:reth-chainspec"] +serde = ["dep:serde", "reth-trie/serde", "revm/serde"] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index d202e3e1b746..17e4ed8ba31a 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -21,6 +21,7 @@ use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive}; /// /// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Chain { /// All blocks in this chain. blocks: BTreeMap, @@ -93,6 +94,11 @@ impl Chain { &self.execution_outcome } + /// Get mutable execution outcome of this chain + pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { + &mut self.execution_outcome + } + /// Prepends the given state to the current state. pub fn prepend_state(&mut self, state: BundleState) { self.execution_outcome.prepend_state(state); @@ -460,6 +466,18 @@ pub enum ChainSplitTarget { Hash(BlockHash), } +impl From for ChainSplitTarget { + fn from(number: BlockNumber) -> Self { + Self::Number(number) + } +} + +impl From for ChainSplitTarget { + fn from(hash: BlockHash) -> Self { + Self::Hash(hash) + } +} + /// Result of a split chain. #[derive(Clone, Debug, PartialEq, Eq)] pub enum ChainSplit { @@ -488,7 +506,7 @@ pub enum ChainSplit { #[cfg(test)] mod tests { use super::*; - use reth_primitives::B256; + use reth_primitives::{Receipt, Receipts, TxType, B256}; use revm::primitives::{AccountInfo, HashMap}; #[test] @@ -603,26 +621,100 @@ mod tests { // split in two assert_eq!( - chain.clone().split(ChainSplitTarget::Hash(block1_hash)), + chain.clone().split(block1_hash.into()), ChainSplit::Split { canonical: chain_split1, pending: chain_split2 } ); // split at unknown block hash assert_eq!( - chain.clone().split(ChainSplitTarget::Hash(B256::new([100; 32]))), + chain.clone().split(B256::new([100; 32]).into()), ChainSplit::NoSplitPending(chain.clone()) ); // split at higher number - assert_eq!( - chain.clone().split(ChainSplitTarget::Number(10)), - ChainSplit::NoSplitPending(chain.clone()) - ); + assert_eq!(chain.clone().split(10u64.into()), ChainSplit::NoSplitPending(chain.clone())); // split at lower number - assert_eq!( - chain.clone().split(ChainSplitTarget::Number(0)), - ChainSplit::NoSplitPending(chain) - ); + assert_eq!(chain.clone().split(0u64.into()), ChainSplit::NoSplitPending(chain)); + } + + #[test] + fn receipts_by_block_hash() { + // Create a default SealedBlockWithSenders object + let block: SealedBlockWithSenders = SealedBlockWithSenders::default(); + + // Define block hashes for block1 and block2 + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Clone the default block into block1 and block2 + let mut block1 = block.clone(); + let mut block2 = block; + + // Set the hashes of block1 and block2 + block1.block.header.set_hash(block1_hash); + block2.block.header.set_hash(block2_hash); + + // Create a random receipt object, receipt1 + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + #[cfg(feature = "optimism")] + deposit_nonce: Some(18), + #[cfg(feature = "optimism")] + deposit_receipt_version: Some(34), + }; + + // Create another random receipt object, receipt2 + let receipt2 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 1325345, + logs: vec![], + success: true, + #[cfg(feature = "optimism")] + deposit_nonce: Some(18), + #[cfg(feature = "optimism")] + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = + Receipts { receipt_vec: vec![vec![Some(receipt1.clone())], vec![Some(receipt2)]] }; + + // Create an ExecutionOutcome object with the created bundle, receipts, an empty requests + // vector, and first_block set to 10 + let execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block: 10, + }; + + // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, + // including block1_hash and block2_hash, and the execution_outcome + let chain = Chain { + blocks: BTreeMap::from([(10, block1), (11, block2)]), + execution_outcome: execution_outcome.clone(), + ..Default::default() + }; + + // Assert that the proper receipt vector is returned for block1_hash + assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); + + // Create an ExecutionOutcome object with a single receipt vector containing receipt1 + let execution_outcome1 = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt1)]] }, + requests: vec![], + first_block: 10, + }; + + // Assert that the execution outcome at the first block contains only the first receipt + assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1)); + + // Assert that the execution outcome at the tip block contains the whole execution outcome + assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome)); } } diff --git a/crates/evm/execution-types/src/bundle.rs b/crates/evm/execution-types/src/execution_outcome.rs similarity index 72% rename from crates/evm/execution-types/src/bundle.rs rename to crates/evm/execution-types/src/execution_outcome.rs index c225bf194a18..c71ed3e115fc 100644 --- a/crates/evm/execution-types/src/bundle.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,8 +1,6 @@ use reth_primitives::{ - logs_bloom, - revm::compat::{into_reth_acc, into_revm_acc}, - Account, Address, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, Requests, StorageEntry, - B256, U256, + logs_bloom, Account, Address, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, Requests, + StorageEntry, B256, U256, }; use reth_trie::HashedPostState; use revm::{ @@ -16,6 +14,7 @@ use std::collections::HashMap; /// The `ExecutionOutcome` structure aggregates the state changes over an arbitrary number of /// blocks, capturing the resulting state, receipts, and requests following the execution. #[derive(Default, Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct ExecutionOutcome { /// Bundle state with reverts. pub bundle: BundleState, @@ -81,8 +80,8 @@ impl ExecutionOutcome { state_init.into_iter().map(|(address, (original, present, storage))| { ( address, - original.map(into_revm_acc), - present.map(into_revm_acc), + original.map(Into::into), + present.map(Into::into), storage.into_iter().map(|(k, s)| (k.into(), s)).collect(), ) }), @@ -91,7 +90,7 @@ impl ExecutionOutcome { reverts.into_iter().map(|(address, (original, storage))| { ( address, - original.map(|i| i.map(into_revm_acc)), + original.map(|i| i.map(Into::into)), storage.into_iter().map(|entry| (entry.key.into(), entry.value)), ) }) @@ -129,7 +128,7 @@ impl ExecutionOutcome { /// Get account if account is known. pub fn account(&self, address: &Address) -> Option> { - self.bundle.account(address).map(|a| a.info.clone().map(into_reth_acc)) + self.bundle.account(address).map(|a| a.info.clone().map(Into::into)) } /// Get storage if value is known. @@ -190,7 +189,7 @@ impl ExecutionOutcome { pub fn optimism_receipts_root_slow( &self, block_number: BlockNumber, - chain_spec: &reth_primitives::ChainSpec, + chain_spec: &reth_chainspec::ChainSpec, timestamp: u64, ) -> Option { self.receipts.optimism_root_slow( @@ -216,17 +215,17 @@ impl ExecutionOutcome { &self.receipts[index] } - /// Is bundle state empty of blocks. + /// Is execution outcome empty. pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Number of blocks in bundle state. + /// Number of blocks in the execution outcome. pub fn len(&self) -> usize { self.receipts.len() } - /// Return first block of the bundle + /// Return first block of the execution outcome pub const fn first_block(&self) -> BlockNumber { self.first_block } @@ -247,6 +246,8 @@ impl ExecutionOutcome { // remove receipts self.receipts.truncate(new_len); + // remove requests + self.requests.truncate(new_len); // Revert last n reverts. self.bundle.revert(rm_trx); @@ -274,6 +275,11 @@ impl ExecutionOutcome { // Truncate higher state to [at..]. let at_idx = higher_state.block_number_to_index(at).unwrap(); higher_state.receipts = higher_state.receipts.split_off(at_idx).into(); + // Ensure that there are enough requests to truncate. + // Sometimes we just have receipts and no requests. + if at_idx < higher_state.requests.len() { + higher_state.requests = higher_state.requests.split_off(at_idx); + } higher_state.bundle.take_n_reverts(at_idx); higher_state.first_block = at; @@ -288,6 +294,7 @@ impl ExecutionOutcome { pub fn extend(&mut self, other: Self) { self.bundle.extend(other.bundle); self.receipts.extend(other.receipts.receipt_vec); + self.requests.extend(other.requests); } /// Prepends present the state with the given `BundleState`. @@ -369,7 +376,7 @@ mod tests { }), Request::WithdrawalRequest(WithdrawalRequest { source_address: Address::from([1; 20]), - validator_public_key: FixedBytes::<48>::from([10; 48]), + validator_pubkey: FixedBytes::<48>::from([10; 48]), amount: 72, }), ])]; @@ -591,4 +598,187 @@ mod tests { // Assert that exec_res_empty_receipts is empty assert!(exec_res_empty_receipts.is_empty()); } + + #[test] + fn test_revert_to() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + #[cfg(feature = "optimism")] + deposit_nonce: Some(18), + #[cfg(feature = "optimism")] + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]], + }; + + // Define the first block number + let first_block = 123; + + // Create a DepositRequest object with specific attributes. + let request = Request::DepositRequest(DepositRequest { + pubkey: FixedBytes::<48>::from([1; 48]), + withdrawal_credentials: B256::from([0; 32]), + amount: 1111, + signature: FixedBytes::<96>::from([2; 96]), + index: 222, + }); + + // Create a vector of Requests containing the request. + let requests = vec![Requests(vec![request]), Requests(vec![request])]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Assert that the revert_to method returns true when reverting to the initial block number. + assert!(exec_res.revert_to(123)); + + // Assert that the receipts are properly cut after reverting to the initial block number. + assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); + + // Assert that the requests are properly cut after reverting to the initial block number. + assert_eq!(exec_res.requests, vec![Requests(vec![request])]); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number greater than the initial block number. + assert!(!exec_res.revert_to(133)); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number less than the initial block number. + assert!(!exec_res.revert_to(10)); + } + + #[test] + fn test_extend_execution_outcome() { + // Create a Receipt object with specific attributes. + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + #[cfg(feature = "optimism")] + deposit_nonce: Some(18), + #[cfg(feature = "optimism")] + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object containing the receipt. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; + + // Create a DepositRequest object with specific attributes. + let request = Request::DepositRequest(DepositRequest { + pubkey: FixedBytes::<48>::from([1; 48]), + withdrawal_credentials: B256::from([0; 32]), + amount: 1111, + signature: FixedBytes::<96>::from([2; 96]), + index: 222, + }); + + // Create a vector of Requests containing the request. + let requests = vec![Requests(vec![request])]; + + // Define the initial block number. + let first_block = 123; + + // Create an ExecutionOutcome object. + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Extend the ExecutionOutcome object by itself. + exec_res.extend(exec_res.clone()); + + // Assert the extended ExecutionOutcome matches the expected outcome. + assert_eq!( + exec_res, + ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] + }, + requests: vec![Requests(vec![request]), Requests(vec![request])], + first_block: 123, + } + ); + } + + #[test] + fn test_split_at_execution_outcome() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + #[cfg(feature = "optimism")] + deposit_nonce: Some(18), + #[cfg(feature = "optimism")] + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![ + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + ], + }; + + // Define the first block number + let first_block = 123; + + // Create a DepositRequest object with specific attributes. + let request = Request::DepositRequest(DepositRequest { + pubkey: FixedBytes::<48>::from([1; 48]), + withdrawal_credentials: B256::from([0; 32]), + amount: 1111, + signature: FixedBytes::<96>::from([2; 96]), + index: 222, + }); + + // Create a vector of Requests containing the request. + let requests = + vec![Requests(vec![request]), Requests(vec![request]), Requests(vec![request])]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Split the ExecutionOutcome at block number 124 + let result = exec_res.clone().split_at(124); + + // Define the expected lower ExecutionOutcome after splitting + let lower_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, + requests: vec![Requests(vec![request])], + first_block, + }; + + // Define the expected higher ExecutionOutcome after splitting + let higher_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], + }, + requests: vec![Requests(vec![request]), Requests(vec![request])], + first_block: 124, + }; + + // Assert that the split result matches the expected lower and higher outcomes + assert_eq!(result.0, Some(lower_execution_outcome)); + assert_eq!(result.1, higher_execution_outcome); + + // Assert that splitting at the first block number returns None for the lower outcome + assert_eq!(exec_res.clone().split_at(123), (None, exec_res)); + } } diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 7680e70852d3..0692fa57eb94 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -8,8 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod bundle; -pub use bundle::*; +mod execution_outcome; +pub use execution_outcome::*; mod chain; pub use chain::*; diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 2c8edfd29265..f6af36d2eb63 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -1,5 +1,7 @@ //! Helper type that represents one of two possible executor types +use std::fmt::Display; + use crate::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; @@ -18,13 +20,15 @@ where A: BlockExecutorProvider, B: BlockExecutorProvider, { - type Executor> = Either, B::Executor>; - type BatchExecutor> = + type Executor + Display>> = + Either, B::Executor>; + + type BatchExecutor + Display>> = Either, B::BatchExecutor>; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { match self { Self::Left(a) => Either::Left(a.executor(db)), @@ -32,13 +36,13 @@ where } } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { match self { - Self::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), - Self::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), + Self::Left(a) => Either::Left(a.batch_executor(db)), + Self::Right(b) => Either::Right(b.batch_executor(db)), } } } @@ -57,7 +61,7 @@ where Output = BlockExecutionOutput, Error = BlockExecutionError, >, - DB: Database, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; @@ -85,7 +89,7 @@ where Output = ExecutionOutcome, Error = BlockExecutionError, >, - DB: Database, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = ExecutionOutcome; @@ -112,6 +116,13 @@ where } } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + match self { + Self::Left(a) => a.set_prune_modes(prune_modes), + Self::Right(b) => b.set_prune_modes(prune_modes), + } + } + fn size_hint(&self) -> Option { match self { Self::Left(a) => a.size_hint(), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index bf479271be06..9d3fd0a5e824 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -5,6 +5,10 @@ use reth_primitives::{BlockNumber, BlockWithSenders, Receipt, Request, U256}; use reth_prune_types::PruneModes; use revm::db::BundleState; use revm_primitives::db::Database; +use std::fmt::Display; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; pub use reth_storage_errors::provider::ProviderError; @@ -81,6 +85,11 @@ pub trait BatchExecutor { /// This can be used to optimize state pruning during execution. fn set_tip(&mut self, tip: BlockNumber); + /// Set the prune modes. + /// + /// They are used to determine which parts of the state should be kept during execution. + fn set_prune_modes(&mut self, prune_modes: PruneModes); + /// The size hint of the batch's tracked state size. /// /// This is used to optimize DB commits depending on the size of the state. @@ -92,7 +101,7 @@ pub trait BatchExecutor { /// Contains the state changes, transaction receipts, and total gas used in the block. /// /// TODO(mattsse): combine with `ExecutionOutcome` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct BlockExecutionOutput { /// The changed state of the block after execution. pub state: BundleState, @@ -139,7 +148,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// /// It is not expected to validate the state trie root, this must be done by the caller using /// the returned state. - type Executor>: for<'a> Executor< + type Executor + Display>>: for<'a> Executor< DB, Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = BlockExecutionOutput, @@ -147,7 +156,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { >; /// An executor that can execute a batch of blocks given a database. - type BatchExecutor>: for<'a> BatchExecutor< + type BatchExecutor + Display>>: for<'a> BatchExecutor< DB, Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = ExecutionOutcome, @@ -159,18 +168,15 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// This is used to execute a single block and get the changed state. fn executor(&self, db: DB) -> Self::Executor where - DB: Database; + DB: Database + Display>; /// Creates a new batch executor with the given database and pruning modes. /// /// Batch executor is used to execute multiple blocks in sequence and keep track of the state /// during historical sync which involves executing multiple blocks in sequence. - /// - /// The pruning modes are used to determine which parts of the state should be kept during - /// execution. - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database; + DB: Database + Display>; } #[cfg(test)] @@ -184,19 +190,19 @@ mod tests { struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { - type Executor> = TestExecutor; - type BatchExecutor> = TestExecutor; + type Executor + Display>> = TestExecutor; + type BatchExecutor + Display>> = TestExecutor; fn executor(&self, _db: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { TestExecutor(PhantomData) } - fn batch_executor(&self, _db: DB, _prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, _db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { TestExecutor(PhantomData) } @@ -231,6 +237,10 @@ mod tests { todo!() } + fn set_prune_modes(&mut self, _prune_modes: PruneModes) { + todo!() + } + fn size_hint(&self) -> Option { None } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index f74dc97c552f..445d9625f80f 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -7,23 +7,32 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] -use reth_primitives::{ - revm::env::fill_block_env, Address, ChainSpec, Header, TransactionSigned, U256, -}; +#[cfg(not(feature = "std"))] +extern crate alloc; + +use core::ops::Deref; + +use reth_chainspec::ChainSpec; +use reth_primitives::{Address, Header, TransactionSigned, TransactionSignedEcRecovered, U256}; use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; +use revm_primitives::{ + BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv, +}; pub mod either; pub mod execute; pub mod noop; pub mod provider; +pub mod system_calls; #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor pub mod test_utils; /// Trait for configuring the EVM for executing full blocks. +#[auto_impl::auto_impl(&, Arc)] pub trait ConfigureEvm: ConfigureEvmEnv { /// Associated type for the default external context that should be configured for the EVM. type DefaultExternalContext<'a>; @@ -95,29 +104,71 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. +/// +/// Default trait method implementation is done w.r.t. L1. +#[auto_impl::auto_impl(&, Arc)] pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { + /// Returns a [`TxEnv`] from a [`TransactionSignedEcRecovered`]. + fn tx_env(&self, transaction: &TransactionSignedEcRecovered) -> TxEnv { + let mut tx_env = TxEnv::default(); + self.fill_tx_env(&mut tx_env, transaction.deref(), transaction.signer()); + tx_env + } + /// Fill transaction environment from a [`TransactionSigned`] and the given sender address. - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + + /// Fill transaction environment with a system contract call. + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ); /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ); + /// Fill [`BlockEnv`] field according to the chain spec and given header + fn fill_block_env(&self, block_env: &mut BlockEnv, header: &Header, after_merge: bool) { + block_env.number = U256::from(header.number); + block_env.coinbase = header.beneficiary; + block_env.timestamp = U256::from(header.timestamp); + if after_merge { + block_env.prevrandao = Some(header.mix_hash); + block_env.difficulty = U256::ZERO; + } else { + block_env.difficulty = header.difficulty; + block_env.prevrandao = None; + } + block_env.basefee = U256::from(header.base_fee_per_gas.unwrap_or_default()); + block_env.gas_limit = U256::from(header.gas_limit); + + // EIP-4844 excess blob gas of this block, introduced in Cancun + if let Some(excess_blob_gas) = header.excess_blob_gas { + block_env.set_blob_excess_gas_and_price(excess_blob_gas); + } + } + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and - /// [`fill_block_env`]. + /// [`ConfigureEvmEnv::fill_block_env`]. fn fill_cfg_and_block_env( + &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - Self::fill_cfg_env(cfg, chain_spec, header, total_difficulty); + self.fill_cfg_env(cfg, chain_spec, header, total_difficulty); let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; - fill_block_env(block_env, chain_spec, header, after_merge); + self.fill_block_env(block_env, header, after_merge); } } diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index fdee35239369..80a2b76de834 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -1,5 +1,7 @@ //! A no operation block executor implementation. +use std::fmt::Display; + use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; @@ -19,20 +21,20 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; pub struct NoopBlockExecutorProvider; impl BlockExecutorProvider for NoopBlockExecutorProvider { - type Executor> = Self; + type Executor + Display>> = Self; - type BatchExecutor> = Self; + type BatchExecutor + Display>> = Self; fn executor(&self, _: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { Self } - fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, _: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { Self } @@ -63,6 +65,8 @@ impl BatchExecutor for NoopBlockExecutorProvider { fn set_tip(&mut self, _: BlockNumber) {} + fn set_prune_modes(&mut self, _: PruneModes) {} + fn size_hint(&self) -> Option { None } diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index abf04be8938c..2e73ff2fa985 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -6,13 +6,13 @@ use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// A provider type that knows chain specific information required to configure a -/// [CfgEnvWithHandlerCfg]. +/// [`CfgEnvWithHandlerCfg`]. /// /// This type is mainly used to provide required data to configure the EVM environment that is /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] pub trait EvmEnvProvider: Send + Sync { - /// Fills the [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the given + /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_env_at( &self, @@ -24,7 +24,7 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv; - /// Fills the default [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the + /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the /// given [Header]. fn env_with_header( &self, @@ -40,7 +40,7 @@ pub trait EvmEnvProvider: Send + Sync { Ok((cfg, block_env)) } - /// Fills the [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the given + /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [Header]. fn fill_env_with_header( &self, @@ -52,21 +52,7 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv; - /// Fills the [BlockEnv] fields with values specific to the given [BlockHashOrNumber]. - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()>; - - /// Fills the [BlockEnv] fields with values specific to the given [Header]. - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()>; - - /// Fills the [CfgEnvWithHandlerCfg] fields with values specific to the given + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_cfg_env_at( &self, @@ -77,7 +63,7 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv; - /// Fills the [CfgEnvWithHandlerCfg] fields with values specific to the given [Header]. + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given [Header]. fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/evm/src/system_calls.rs b/crates/evm/src/system_calls.rs new file mode 100644 index 000000000000..9d493f51795e --- /dev/null +++ b/crates/evm/src/system_calls.rs @@ -0,0 +1,402 @@ +//! System contract call functions. + +use crate::ConfigureEvm; +use alloy_eips::{ + eip4788::BEACON_ROOTS_ADDRESS, + eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}, + eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}, +}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Buf, Request}; +use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; +use revm_primitives::{ + Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, + ResultAndState, B256, +}; + +/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. +/// +/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state +/// change. +#[allow(clippy::too_many_arguments)] +pub fn pre_block_beacon_root_contract_call( + db: &mut DB, + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + block_number: u64, + block_timestamp: u64, + parent_beacon_block_root: Option, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: std::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // apply pre-block EIP-4788 contract call + let mut evm_pre_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the pre block call needs the block itself + apply_beacon_root_contract_call( + evm_config, + chain_spec, + block_timestamp, + block_number, + parent_beacon_block_root, + &mut evm_pre_block, + ) +} + +/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, +/// [`ChainSpec`], EVM. +/// +/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +/// +/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 +#[inline] +pub fn apply_beacon_root_contract_call( + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + block_timestamp: u64, + block_number: u64, + parent_beacon_block_root: Option, + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm, +{ + if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { + return Ok(()) + } + + let parent_beacon_block_root = + parent_beacon_block_root.ok_or(BlockValidationError::MissingParentBeaconBlockRoot)?; + + // if the block number is zero (genesis block) then the parent beacon block root must + // be 0x0 and no system transaction may occur as per EIP-4788 + if block_number == 0 { + if parent_beacon_block_root != B256::ZERO { + return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { + parent_beacon_block_root, + } + .into()) + } + return Ok(()) + } + + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip4788::SYSTEM_ADDRESS, + BEACON_ROOTS_ADDRESS, + parent_beacon_block_root.0.into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::BeaconRootContractCall { + parent_beacon_block_root: Box::new(parent_beacon_block_root), + message: e.to_string(), + } + .into()) + } + }; + + state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} + +/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. +/// +/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the +/// [requests](Request). +pub fn post_block_withdrawal_requests_contract_call( + evm_config: &EvmConfig, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: std::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // apply post-block EIP-7002 contract call + let mut evm_post_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the post block call needs the block itself + apply_withdrawal_requests_contract_call::(evm_config, &mut evm_post_block) +} + +/// Applies the post-block call to the EIP-7002 withdrawal requests contract. +/// +/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is +/// returned. Otherwise, the withdrawal requests are returned. +#[inline] +pub fn apply_withdrawal_requests_contract_call( + evm_config: &EvmConfig, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // Fill transaction environment with the EIP-7002 withdrawal requests contract message data. + // + // This requirement for the withdrawal requests contract call defined by + // [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) is: + // + // At the end of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. + // after processing all transactions and after performing the block body withdrawal requests + // validations), call the contract as `SYSTEM_ADDRESS`. + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip7002::SYSTEM_ADDRESS, + WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, + Bytes::new(), + ); + + let ResultAndState { result, mut state } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution failed: {e}"), + } + .into()) + } + }; + + // cleanup the state + state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + let mut data = match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => { + Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution reverted: {output}"), + }) + } + ExecutionResult::Halt { reason, .. } => { + Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution halted: {reason:?}"), + }) + } + }?; + + // Withdrawals are encoded as a series of withdrawal requests, each with the following + // format: + // + // +------+--------+--------+ + // | addr | pubkey | amount | + // +------+--------+--------+ + // 20 48 8 + + const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; + let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); + while data.has_remaining() { + if data.remaining() < WITHDRAWAL_REQUEST_SIZE { + return Err(BlockValidationError::WithdrawalRequestsContractCall { + message: "invalid withdrawal request length".to_string(), + } + .into()) + } + + let mut source_address = Address::ZERO; + data.copy_to_slice(source_address.as_mut_slice()); + + let mut validator_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(validator_pubkey.as_mut_slice()); + + let amount = data.get_u64(); + + withdrawal_requests.push(Request::WithdrawalRequest(WithdrawalRequest { + source_address, + validator_pubkey, + amount, + })); + } + + Ok(withdrawal_requests) +} + +/// Apply the [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) post block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. +/// +/// This uses [`apply_consolidation_requests_contract_call`] to ultimately calculate the +/// [requests](Request). +pub fn post_block_consolidation_requests_contract_call( + evm_config: &EvmConfig, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: std::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // apply post-block EIP-7251 contract call + let mut evm_post_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the post block call needs the block itself + apply_consolidation_requests_contract_call::(evm_config, &mut evm_post_block) +} + +/// Applies the post-block call to the EIP-7251 consolidation requests contract. +/// +/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is +/// returned. Otherwise, the consolidation requests are returned. +#[inline] +pub fn apply_consolidation_requests_contract_call( + evm_config: &EvmConfig, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // Fill transaction environment with the EIP-7251 consolidation requests contract message data. + // + // This requirement for the consolidation requests contract call defined by + // [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) is: + // + // At the end of processing any execution block where block.timestamp >= FORK_TIMESTAMP (i.e. + // after processing all transactions and after performing the block body requests validations) + // clienst software MUST [..] call the contract as `SYSTEM_ADDRESS` and empty input data to + // trigger the system subroutine execute. + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip7002::SYSTEM_ADDRESS, + CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS, + Bytes::new(), + ); + + let ResultAndState { result, mut state } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution failed: {e}"), + } + .into()) + } + }; + + // cleanup the state + state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + let mut data = match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => { + Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution reverted: {output}"), + }) + } + ExecutionResult::Halt { reason, .. } => { + Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution halted: {reason:?}"), + }) + } + }?; + + // Consolidations are encoded as a series of consolidation requests, each with the following + // format: + // + // +------+--------+---------------+ + // | addr | pubkey | target pubkey | + // +------+--------+---------------+ + // 20 48 48 + + const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; + let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); + while data.has_remaining() { + if data.remaining() < CONSOLIDATION_REQUEST_SIZE { + return Err(BlockValidationError::ConsolidationRequestsContractCall { + message: "invalid consolidation request length".to_string(), + } + .into()) + } + + let mut source_address = Address::ZERO; + data.copy_to_slice(source_address.as_mut_slice()); + + let mut source_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(source_pubkey.as_mut_slice()); + + let mut target_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(target_pubkey.as_mut_slice()); + + consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { + source_address, + source_pubkey, + target_pubkey, + })); + } + + Ok(consolidation_requests) +} diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index c9627933a7fc..c3aa34a56a45 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -10,7 +10,7 @@ use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; /// A [`BlockExecutorProvider`] that returns mocked execution results. #[derive(Clone, Debug, Default)] @@ -26,20 +26,20 @@ impl MockExecutorProvider { } impl BlockExecutorProvider for MockExecutorProvider { - type Executor> = Self; + type Executor + Display>> = Self; - type BatchExecutor> = Self; + type BatchExecutor + Display>> = Self; fn executor(&self, _: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { self.clone() } - fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, _: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { self.clone() } @@ -77,6 +77,8 @@ impl BatchExecutor for MockExecutorProvider { fn set_tip(&mut self, _: BlockNumber) {} + fn set_prune_modes(&mut self, _: PruneModes) {} + fn size_hint(&self) -> Option { None } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 4df59b3fc8c8..ec86deb6f4b6 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -19,11 +19,16 @@ reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true reth-network.workspace = true reth-payload-builder.workspace = true +reth-evm.workspace = true +reth-prune-types.workspace = true +reth-revm.workspace = true +reth-stages-api.workspace = true ## async tokio.workspace = true @@ -32,3 +37,20 @@ tokio-util.workspace = true ## misc eyre.workspace = true metrics.workspace = true +serde = { workspace = true, optional = true } + +[dev-dependencies] +reth-chainspec.workspace = true +reth-evm-ethereum.workspace = true +reth-testing-utils.workspace = true +reth-blockchain-tree.workspace = true +reth-db-common.workspace = true +reth-node-api.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true + +secp256k1.workspace = true + +[features] +default = [] +serde = ["dep:serde", "reth-provider/serde"] diff --git a/crates/exex/exex/src/backfill.rs b/crates/exex/exex/src/backfill.rs new file mode 100644 index 000000000000..36f00573437c --- /dev/null +++ b/crates/exex/exex/src/backfill.rs @@ -0,0 +1,522 @@ +use reth_evm::execute::{ + BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, +}; +use reth_node_api::FullNodeComponents; +use reth_primitives::{Block, BlockNumber, BlockWithSenders, Receipt}; +use reth_primitives_traits::format_gas_throughput; +use reth_provider::{ + BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, +}; +use reth_prune_types::PruneModes; +use reth_revm::database::StateProviderDatabase; +use reth_stages_api::ExecutionStageThresholds; +use reth_tracing::tracing::{debug, trace}; +use std::{ + ops::RangeInclusive, + time::{Duration, Instant}, +}; + +/// Factory for creating new backfill jobs. +#[derive(Debug, Clone)] +pub struct BackfillJobFactory { + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, +} + +impl BackfillJobFactory { + /// Creates a new [`BackfillJobFactory`]. + pub fn new(executor: E, provider: P) -> Self { + Self { + executor, + provider, + prune_modes: PruneModes::none(), + thresholds: ExecutionStageThresholds::default(), + } + } + + /// Sets the prune modes + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } + + /// Sets the thresholds + pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self { + self.thresholds = thresholds; + self + } +} + +impl BackfillJobFactory { + /// Creates a new backfill job for the given range. + pub fn backfill(&self, range: RangeInclusive) -> BackfillJob { + BackfillJob { + executor: self.executor.clone(), + provider: self.provider.clone(), + prune_modes: self.prune_modes.clone(), + range, + thresholds: self.thresholds.clone(), + } + } +} + +impl BackfillJobFactory<(), ()> { + /// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`]. + pub fn new_from_components( + components: Node, + ) -> BackfillJobFactory { + BackfillJobFactory::<_, _>::new( + components.block_executor().clone(), + components.provider().clone(), + ) + } +} + +/// Backfill job started for a specific range. +/// +/// It implements [`Iterator`] that executes blocks in batches according to the provided thresholds +/// and yields [`Chain`] +#[derive(Debug)] +pub struct BackfillJob { + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, + range: RangeInclusive, +} + +impl Iterator for BackfillJob +where + E: BlockExecutorProvider, + P: HeaderProvider + BlockReader + StateProviderFactory, +{ + type Item = Result; + + fn next(&mut self) -> Option { + if self.range.is_empty() { + return None + } + + Some(self.execute_range()) + } +} + +impl BackfillJob +where + E: BlockExecutorProvider, + P: BlockReader + HeaderProvider + StateProviderFactory, +{ + fn execute_range(&mut self) -> Result { + let mut executor = self.executor.batch_executor(StateProviderDatabase::new( + self.provider.history_by_block_number(self.range.start().saturating_sub(1))?, + )); + executor.set_prune_modes(self.prune_modes.clone()); + + let mut fetch_block_duration = Duration::default(); + let mut execution_duration = Duration::default(); + let mut cumulative_gas = 0; + let batch_start = Instant::now(); + + let mut blocks = Vec::new(); + for block_number in self.range.clone() { + // Fetch the block + let fetch_block_start = Instant::now(); + + let td = self + .provider + .header_td_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + // we need the block's transactions along with their hashes + let block = self + .provider + .sealed_block_with_senders(block_number.into(), TransactionVariant::WithHash)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + fetch_block_duration += fetch_block_start.elapsed(); + + cumulative_gas += block.gas_used; + + // Configure the executor to use the current state. + trace!(target: "exex::backfill", number = block_number, txs = block.body.len(), "Executing block"); + + // Execute the block + let execute_start = Instant::now(); + + // Unseal the block for execution + let (block, senders) = block.into_components(); + let (unsealed_header, hash) = block.header.split(); + let block = Block { + header: unsealed_header, + body: block.body, + ommers: block.ommers, + withdrawals: block.withdrawals, + requests: block.requests, + } + .with_senders_unchecked(senders); + + executor.execute_and_verify_one((&block, td).into())?; + execution_duration += execute_start.elapsed(); + + // TODO(alexey): report gas metrics using `block.header.gas_used` + + // Seal the block back and save it + blocks.push(block.seal(hash)); + + // Check if we should commit now + let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64; + if self.thresholds.is_end_of_batch( + block_number - *self.range.start(), + bundle_size_hint, + cumulative_gas, + batch_start.elapsed(), + ) { + break + } + } + + let last_block_number = blocks.last().expect("blocks should not be empty").number; + debug!( + target: "exex::backfill", + range = ?*self.range.start()..=last_block_number, + block_fetch = ?fetch_block_duration, + execution = ?execution_duration, + throughput = format_gas_throughput(cumulative_gas, execution_duration), + "Finished executing block range" + ); + self.range = last_block_number + 1..=*self.range.end(); + + let chain = Chain::new(blocks, executor.finalize(), None); + Ok(chain) + } +} + +impl BackfillJob { + /// Converts the backfill job into a single block backfill job. + pub fn into_single_blocks(self) -> SingleBlockBackfillJob { + self.into() + } +} + +impl From> for SingleBlockBackfillJob { + fn from(value: BackfillJob) -> Self { + Self { executor: value.executor, provider: value.provider, range: value.range } + } +} + +/// Single block Backfill job started for a specific range. +/// +/// It implements [`Iterator`] which executes a block each time the +/// iterator is advanced and yields ([`BlockWithSenders`], [`BlockExecutionOutput`]) +#[derive(Debug)] +pub struct SingleBlockBackfillJob { + executor: E, + provider: P, + range: RangeInclusive, +} + +impl Iterator for SingleBlockBackfillJob +where + E: BlockExecutorProvider, + P: HeaderProvider + BlockReader + StateProviderFactory, +{ + type Item = Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError>; + + fn next(&mut self) -> Option { + self.range.next().map(|block_number| self.execute_block(block_number)) + } +} + +impl SingleBlockBackfillJob +where + E: BlockExecutorProvider, + P: HeaderProvider + BlockReader + StateProviderFactory, +{ + fn execute_block( + &self, + block_number: u64, + ) -> Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError> { + let td = self + .provider + .header_td_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + // Fetch the block with senders for execution. + let block_with_senders = self + .provider + .block_with_senders(block_number.into(), TransactionVariant::WithHash)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + // Configure the executor to use the previous block's state. + let executor = self.executor.executor(StateProviderDatabase::new( + self.provider.history_by_block_number(block_number.saturating_sub(1))?, + )); + + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body.len(), "Executing block"); + + let block_execution_output = executor.execute((&block_with_senders, td).into())?; + + Ok((block_with_senders, block_execution_output)) + } +} + +#[cfg(test)] +mod tests { + use crate::BackfillJobFactory; + use eyre::OptionExt; + use reth_blockchain_tree::noop::NoopBlockchainTree; + use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; + use reth_db_common::init::init_genesis; + use reth_evm::execute::{ + BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, + }; + use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_primitives::{ + b256, constants::ETH_TO_WEI, public_key_to_address, Address, Block, BlockWithSenders, + Genesis, GenesisAccount, Header, Receipt, Requests, SealedBlockWithSenders, Transaction, + TxEip2930, TxKind, U256, + }; + use reth_provider::{ + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + BlockWriter, ExecutionOutcome, LatestStateProviderRef, ProviderFactory, + }; + use reth_revm::database::StateProviderDatabase; + use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; + use secp256k1::Keypair; + use std::sync::Arc; + + fn to_execution_outcome( + block_number: u64, + block_execution_output: &BlockExecutionOutput, + ) -> ExecutionOutcome { + ExecutionOutcome { + bundle: block_execution_output.state.clone(), + receipts: block_execution_output.receipts.clone().into(), + first_block: block_number, + requests: vec![Requests(block_execution_output.requests.clone())], + } + } + + fn chain_spec(address: Address) -> Arc { + // Create a chain spec with a genesis state that contains the + // provided sender + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() + }) + .paris_activated() + .build(), + ) + } + + fn execute_block_and_commit_to_database( + provider_factory: &ProviderFactory, + chain_spec: Arc, + block: &BlockWithSenders, + ) -> eyre::Result> + where + DB: reth_db_api::database::Database, + { + let provider = provider_factory.provider()?; + + // Execute the block to produce a block execution output + let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new( + provider.tx_ref(), + provider.static_file_provider().clone(), + ))) + .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; + block_execution_output.state.reverts.sort(); + + // Convert the block execution output to an execution outcome for committing to the database + let execution_outcome = to_execution_outcome(block.number, &block_execution_output); + + // Commit the block's execution outcome to the database + let provider_rw = provider_factory.provider_rw()?; + let block = block.clone().seal_slow(); + provider_rw.append_blocks_with_state( + vec![block], + execution_outcome, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + Ok(block_execution_output) + } + + fn blocks_and_execution_outputs( + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, + ) -> eyre::Result)>> + where + DB: reth_db_api::database::Database, + { + // First block has a transaction that transfers some ETH to zero address + let block1 = Block { + header: Header { + parent_hash: chain_spec.genesis_hash(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 1, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + // Second block resends the same transaction with increased nonce + let block2 = Block { + header: Header { + parent_hash: block1.header.hash_slow(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 2, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 1, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let block_output1 = + execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?; + let block_output2 = + execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; + + let block1 = block1.seal_slow(); + let block2 = block2.seal_slow(); + + Ok(vec![(block1, block_output1), (block2, block_output2)]) + } + + #[test] + fn test_backfill() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = Keypair::new_global(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new( + provider_factory.clone(), + Arc::new(NoopBlockchainTree::default()), + )?; + + let blocks_and_execution_outputs = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + let (block, block_execution_output) = blocks_and_execution_outputs.first().unwrap(); + let execution_outcome = to_execution_outcome(block.number, block_execution_output); + + // Backfill the first block + let factory = BackfillJobFactory::new(executor, blockchain_db); + let job = factory.backfill(1..=1); + let chains = job.collect::, _>>()?; + + // Assert that the backfill job produced the same chain as we got before when we were + // executing only the first block + assert_eq!(chains.len(), 1); + let mut chain = chains.into_iter().next().unwrap(); + chain.execution_outcome_mut().bundle.reverts.sort(); + assert_eq!(chain.blocks(), &[(1, block.clone())].into()); + assert_eq!(chain.execution_outcome(), &execution_outcome); + + Ok(()) + } + + #[test] + fn test_single_block_backfill() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = Keypair::new_global(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new( + provider_factory.clone(), + Arc::new(NoopBlockchainTree::default()), + )?; + + let blocks_and_execution_outcomes = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + + // Backfill the first block + let factory = BackfillJobFactory::new(executor, blockchain_db); + let job = factory.backfill(1..=1); + let single_job = job.into_single_blocks(); + let block_execution_it = single_job.into_iter(); + + // Assert that the backfill job only produces a single block + let blocks_and_outcomes = block_execution_it.collect::>(); + assert_eq!(blocks_and_outcomes.len(), 1); + + // Assert that the backfill job single block iterator produces the expected output for each + // block + for (i, res) in blocks_and_outcomes.into_iter().enumerate() { + let (block, mut execution_output) = res?; + execution_output.state.reverts.sort(); + + let sealed_block_with_senders = blocks_and_execution_outcomes[i].0.clone(); + let expected_block = sealed_block_with_senders.unseal(); + let expected_output = &blocks_and_execution_outcomes[i].1; + + assert_eq!(block, expected_block); + assert_eq!(&execution_output, expected_output); + } + + Ok(()) + } +} diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 6edb8f558324..86939e3ba767 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,13 +1,11 @@ -use std::fmt::Debug; - -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; +use crate::{ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; use reth_tasks::TaskExecutor; +use std::fmt::Debug; use tokio::sync::mpsc::{Receiver, UnboundedSender}; -use crate::{ExExEvent, ExExNotification}; - /// Captures the context that an `ExEx` has access to. pub struct ExExContext { /// The current head of the blockchain at launch. @@ -49,46 +47,39 @@ impl Debug for ExExContext { } } -impl NodeTypes for ExExContext { - type Primitives = Node::Primitives; - type Engine = Node::Engine; -} - -impl FullNodeTypes for ExExContext { - type DB = Node::DB; - type Provider = Node::Provider; -} - -impl FullNodeComponents for ExExContext { - type Pool = Node::Pool; - type Evm = Node::Evm; - type Executor = Node::Executor; - - fn pool(&self) -> &Self::Pool { +impl ExExContext { + /// Returns the transaction pool of the node. + pub fn pool(&self) -> &Node::Pool { self.components.pool() } - fn evm_config(&self) -> &Self::Evm { + /// Returns the node's evm config. + pub fn evm_config(&self) -> &Node::Evm { self.components.evm_config() } - fn block_executor(&self) -> &Self::Executor { + /// Returns the node's executor type. + pub fn block_executor(&self) -> &Node::Executor { self.components.block_executor() } - fn provider(&self) -> &Self::Provider { + /// Returns the provider of the node. + pub fn provider(&self) -> &Node::Provider { self.components.provider() } - fn network(&self) -> &reth_network::NetworkHandle { + /// Returns the handle to the network + pub fn network(&self) -> &reth_network::NetworkHandle { self.components.network() } - fn payload_builder(&self) -> &reth_payload_builder::PayloadBuilderHandle { + /// Returns the handle to the payload builder service. + pub fn payload_builder(&self) -> &reth_payload_builder::PayloadBuilderHandle { self.components.payload_builder() } - fn task_executor(&self) -> &TaskExecutor { + /// Returns the task executor. + pub fn task_executor(&self) -> &TaskExecutor { self.components.task_executor() } } diff --git a/crates/exex/exex/src/lib.rs b/crates/exex/exex/src/lib.rs index a7661d855f41..5f859accca42 100644 --- a/crates/exex/exex/src/lib.rs +++ b/crates/exex/exex/src/lib.rs @@ -34,6 +34,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +mod backfill; +pub use backfill::*; + mod context; pub use context::*; diff --git a/crates/exex/exex/src/notification.rs b/crates/exex/exex/src/notification.rs index 9f1beec414d3..390d9dc665a7 100644 --- a/crates/exex/exex/src/notification.rs +++ b/crates/exex/exex/src/notification.rs @@ -4,6 +4,7 @@ use reth_provider::{CanonStateNotification, Chain}; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 42412db8d9b9..b5b62471b6f7 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -12,20 +12,23 @@ workspace = true [dependencies] ## reth +reth-chainspec.workspace = true reth-blockchain-tree.workspace = true reth-config.workspace = true +reth-consensus = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } +reth-execution-types.workspace = true reth-exex.workspace = true reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } reth-node-ethereum.workspace = true reth-payload-builder.workspace = true reth-primitives.workspace = true -reth-provider.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 2c7b57172825..cba6e01246e6 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -10,15 +10,19 @@ use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; +use reth_chainspec::{ChainSpec, MAINNET}; +use reth_consensus::test_utils::TestConsensus; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_evm::test_utils::MockExecutorProvider; +use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; use reth_node_builder::{ components::{ - Components, ComponentsBuilder, ExecutorBuilder, NodeComponentsBuilder, PoolBuilder, + Components, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NodeComponentsBuilder, + PoolBuilder, }, BuilderContext, Node, NodeAdapter, RethFullAdapter, }; @@ -28,10 +32,10 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{ChainSpec, Head, SealedBlockWithSenders, MAINNET}; +use reth_primitives::{Head, SealedBlockWithSenders}; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, - BlockReader, Chain, ProviderFactory, + BlockReader, ProviderFactory, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -83,6 +87,22 @@ where } } +/// A test [`ConsensusBuilder`] that builds a [`TestConsensus`]. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct TestConsensusBuilder; + +impl ConsensusBuilder for TestConsensusBuilder +where + Node: FullNodeTypes, +{ + type Consensus = Arc; + + async fn build_consensus(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(Arc::new(TestConsensus::default())) + } +} + /// A test [`Node`]. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -103,6 +123,7 @@ where EthereumPayloadBuilder, EthereumNetworkBuilder, TestExecutorBuilder, + TestConsensusBuilder, >; fn components_builder(self) -> Self::ComponentsBuilder { @@ -112,16 +133,22 @@ where .payload(EthereumPayloadBuilder::default()) .network(EthereumNetworkBuilder::default()) .executor(TestExecutorBuilder::default()) + .consensus(TestConsensusBuilder::default()) } } -type TmpDB = Arc>; -type Adapter = NodeAdapter< +/// A shared [`TempDatabase`] used for testing +pub type TmpDB = Arc>; +/// The [`NodeAdapter`] for the [`TestExExContext`]. Contains type necessary to +/// boot the testing environment +pub type Adapter = NodeAdapter< RethFullAdapter, <>>>::ComponentsBuilder as NodeComponentsBuilder< RethFullAdapter, >>::Components, >; +/// An [`ExExContext`] using the [`Adapter`] type. +pub type TestExExContext = ExExContext; /// A helper type for testing Execution Extensions. #[derive(Debug)] @@ -134,6 +161,8 @@ pub struct TestExExHandle { pub events_rx: UnboundedReceiver, /// Channel for sending notifications to the Execution Extension pub notifications_tx: Sender, + /// Node task manager + pub tasks: TaskManager, } impl TestExExHandle { @@ -198,6 +227,7 @@ pub async fn test_exex_context_with_chain_spec( let transaction_pool = testing_pool(); let evm_config = EthEvmConfig::default(); let executor = MockExecutorProvider::default(); + let consensus = Arc::new(TestConsensus::default()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); let genesis_hash = init_genesis(provider_factory.clone())?; @@ -217,7 +247,14 @@ pub async fn test_exex_context_with_chain_spec( let task_executor = tasks.executor(); let components = NodeAdapter::, _> { - components: Components { transaction_pool, evm_config, executor, network, payload_builder }, + components: Components { + transaction_pool, + evm_config, + executor, + consensus, + network, + payload_builder, + }, task_executor, provider, }; @@ -249,7 +286,7 @@ pub async fn test_exex_context_with_chain_spec( components, }; - Ok((ctx, TestExExHandle { genesis, provider_factory, events_rx, notifications_tx })) + Ok((ctx, TestExExHandle { genesis, provider_factory, events_rx, notifications_tx, tasks })) } /// Creates a new [`ExExContext`] with (mainnet)[`MAINNET`] chain spec. diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 8797376da74b..e03b63342f72 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -12,4 +12,4 @@ description = "Commonly used types for exex usage in reth." workspace = true [dependencies] -alloy-primitives.workspace = true \ No newline at end of file +alloy-primitives.workspace = true diff --git a/crates/net/common/Cargo.toml b/crates/net/banlist/Cargo.toml similarity index 57% rename from crates/net/common/Cargo.toml rename to crates/net/banlist/Cargo.toml index c477725b559c..a9fb9fcda609 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/banlist/Cargo.toml @@ -1,19 +1,16 @@ [package] -name = "reth-net-common" +name = "reth-net-banlist" version.workspace = true edition.workspace = true rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -description = "Types shared across network code" +description = "Banlist for peers and IPs" [lints] workspace = true [dependencies] # ethereum -alloy-primitives.workspace = true - -# async -tokio = { workspace = true, features = ["full"] } +alloy-primitives.workspace = true \ No newline at end of file diff --git a/crates/net/common/src/ban_list.rs b/crates/net/banlist/src/lib.rs similarity index 94% rename from crates/net/common/src/ban_list.rs rename to crates/net/banlist/src/lib.rs index e547d4c399e5..bc7d3471952b 100644 --- a/crates/net/common/src/ban_list.rs +++ b/crates/net/banlist/src/lib.rs @@ -1,5 +1,13 @@ //! Support for banning peers. +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + type PeerId = alloy_primitives::B512; use std::{collections::HashMap, net::IpAddr, time::Instant}; diff --git a/crates/net/common/src/stream.rs b/crates/net/common/src/stream.rs deleted file mode 100644 index 4cf6f12bb1de..000000000000 --- a/crates/net/common/src/stream.rs +++ /dev/null @@ -1,13 +0,0 @@ -use std::net::SocketAddr; -use tokio::net::TcpStream; -/// This trait is for instrumenting a `TCPStream` with a socket addr -pub trait HasRemoteAddr { - /// Maybe returns a [`SocketAddr`] - fn remote_addr(&self) -> Option; -} - -impl HasRemoteAddr for TcpStream { - fn remote_addr(&self) -> Option { - self.peer_addr().ok() - } -} diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fa6a396cbb04..2121b904c7fb 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -13,12 +13,13 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true -reth-net-common.workspace = true +reth-net-banlist.workspace = true +reth-ethereum-forks.workspace = true reth-net-nat.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } # ethereum +alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true secp256k1 = { workspace = true, features = [ @@ -28,6 +29,7 @@ secp256k1 = { workspace = true, features = [ "serde", ] } enr.workspace = true + # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true @@ -42,6 +44,7 @@ generic-array.workspace = true serde = { workspace = true, optional = true } [dev-dependencies] +reth-chainspec.workspace = true assert_matches.workspace = true rand.workspace = true tokio = { workspace = true, features = ["macros"] } @@ -49,5 +52,5 @@ reth-tracing.workspace = true [features] default = ["serde"] -test-utils = ["dep:rand"] serde = ["dep:serde"] +test-utils = ["dep:rand"] diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 43030e70a35a..4fae31f585ae 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -3,10 +3,11 @@ //! This basis of this file has been taken from the discv5 codebase: //! +use alloy_primitives::bytes::Bytes; use alloy_rlp::Encodable; -use reth_net_common::ban_list::BanList; +use reth_net_banlist::BanList; use reth_net_nat::{NatResolver, ResolveNatInterval}; -use reth_primitives::{bytes::Bytes, NodeRecord}; +use reth_network_peers::NodeRecord; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 49ba1da0396a..d6717f834f47 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -28,6 +28,7 @@ use crate::{ error::{DecodePacketError, Discv4Error}, proto::{FindNode, Message, Neighbours, Packet, Ping, Pong}, }; +use alloy_primitives::{bytes::Bytes, hex, B256}; use discv5::{ kbucket, kbucket::{ @@ -39,8 +40,8 @@ use discv5::{ use enr::Enr; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; +use reth_ethereum_forks::ForkId; use reth_network_peers::{pk2id, PeerId}; -use reth_primitives::{bytes::Bytes, hex, ForkId, B256}; use secp256k1::SecretKey; use std::{ cell::RefCell, @@ -76,7 +77,7 @@ use node::{kad_key, NodeKey}; mod table; // reexport NodeRecord primitive -pub use reth_primitives::NodeRecord; +pub use reth_network_peers::NodeRecord; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; @@ -214,8 +215,7 @@ impl Discv4 { /// # use std::io; /// use rand::thread_rng; /// use reth_discv4::{Discv4, Discv4Config}; - /// use reth_network_peers::{pk2id, PeerId}; - /// use reth_primitives::NodeRecord; + /// use reth_network_peers::{pk2id, NodeRecord, PeerId}; /// use secp256k1::SECP256K1; /// use std::{net::SocketAddr, str::FromStr}; /// # async fn t() -> io::Result<()> { @@ -1536,7 +1536,7 @@ impl Discv4Service { /// - timestamp is expired (lower than current local UNIX timestamp) fn ensure_not_expired(&self, timestamp: u64) -> Result<(), ()> { // ensure the timestamp is a valid UNIX timestamp - let _ = i64::try_from(timestamp).map_err(|_| ())?; + let _ = i64::try_from(timestamp).map_err(drop)?; let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); if self.config.enforce_expiration_timestamps && timestamp < now { @@ -2284,9 +2284,11 @@ pub enum DiscoveryUpdate { mod tests { use super::*; use crate::test_utils::{create_discv4, create_discv4_with_config, rng_endpoint, rng_record}; + use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable}; use rand::{thread_rng, Rng}; - use reth_primitives::{hex, mainnet_nodes, EnrForkIdEntry, ForkHash}; + use reth_ethereum_forks::{EnrForkIdEntry, ForkHash}; + use reth_network_peers::mainnet_nodes; use std::future::poll_fn; #[tokio::test] diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 0a8f436c3f0e..242c38832286 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,6 +1,6 @@ +use alloy_primitives::keccak256; use generic_array::GenericArray; -use reth_network_peers::PeerId; -use reth_primitives::{keccak256, NodeRecord}; +use reth_network_peers::{NodeRecord, PeerId}; /// The key type for the table. #[derive(Debug, Copy, Clone, Eq, PartialEq)] diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 9ef4430d8819..1d257bebb3a3 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,13 +1,14 @@ //! Discovery v4 protocol implementation. use crate::{error::DecodePacketError, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; -use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; -use enr::Enr; -use reth_network_peers::{pk2id, PeerId}; -use reth_primitives::{ +use alloy_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, EnrForkIdEntry, ForkId, NodeRecord, B256, + keccak256, B256, }; +use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; +use enr::Enr; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; +use reth_network_peers::{pk2id, NodeRecord, PeerId}; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, SecretKey, SECP256K1, @@ -541,10 +542,11 @@ mod tests { test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message}, DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; + use alloy_primitives::hex; use assert_matches::assert_matches; use enr::EnrPublicKey; use rand::{thread_rng, Rng, RngCore}; - use reth_primitives::{hex, ForkHash}; + use reth_ethereum_forks::ForkHash; #[test] fn test_endpoint_ipv_v4() { diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index d470c318a406..37d8c60db7f5 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -5,9 +5,10 @@ use crate::{ receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent, IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; +use alloy_primitives::{hex, B256}; use rand::{thread_rng, Rng, RngCore}; -use reth_network_peers::pk2id; -use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256}; +use reth_ethereum_forks::{ForkHash, ForkId}; +use reth_network_peers::{pk2id, NodeRecord}; use secp256k1::{SecretKey, SECP256K1}; use std::{ collections::{HashMap, HashSet}, diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml index e4ec56736e2d..e92618466ff3 100644 --- a/crates/net/discv5/Cargo.toml +++ b/crates/net/discv5/Cargo.toml @@ -13,11 +13,13 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-chainspec.workspace = true +reth-ethereum-forks.workspace = true reth-metrics.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } # ethereum +alloy-primitives.workspace = true alloy-rlp.workspace = true discv5 = { workspace = true, features = ["libp2p"] } enr.workspace = true diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 962882a30b31..669e7d04fe04 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -6,10 +6,12 @@ use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, }; +use alloy_primitives::Bytes; use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; -use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; +use reth_network_peers::NodeRecord; use tracing::warn; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; @@ -129,7 +131,7 @@ impl ConfigBuilder { } /// Adds boot nodes in the form a list of [`NodeRecord`]s, parsed enodes. - pub fn add_unsigned_boot_nodes(mut self, enodes: impl Iterator) -> Self { + pub fn add_unsigned_boot_nodes(mut self, enodes: impl IntoIterator) -> Self { for node in enodes { if let Ok(node) = BootNode::from_unsigned(node) { self.bootstrap_nodes.insert(node); @@ -188,7 +190,7 @@ impl ConfigBuilder { self } - /// Sets the the number of times at which to run boost lookup queries to bootstrap the node. + /// Sets the number of times at which to run boost lookup queries to bootstrap the node. pub const fn bootstrap_lookup_countdown(mut self, counts: u64) -> Self { self.bootstrap_lookup_countdown = Some(counts); self @@ -447,7 +449,7 @@ impl BootNode { mod test { use std::net::SocketAddrV4; - use reth_primitives::hex; + use alloy_primitives::hex; use super::*; diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index 0a5d125752e6..4c06587741c5 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -58,7 +58,8 @@ mod tests { use super::*; use alloy_rlp::Encodable; use discv5::enr::{CombinedKey, EnrKey}; - use reth_primitives::{Hardfork, NodeRecord, MAINNET}; + use reth_chainspec::{EthereumHardfork, MAINNET}; + use reth_network_peers::NodeRecord; #[test] fn discv5_discv4_id_conversion() { @@ -83,7 +84,7 @@ mod tests { let key = CombinedKey::generate_secp256k1(); let mut buf = Vec::new(); - let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier); + let fork_id = MAINNET.hardfork_fork_id(EthereumHardfork::Frontier); fork_id.unwrap().encode(&mut buf); let enr = Enr::builder() diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index 27763146481c..73b05ee81b2a 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -17,7 +17,7 @@ pub enum Error { /// Missing key used to identify rlpx network. #[error("fork missing on enr, key missing")] ForkMissing(&'static [u8]), - /// Failed to decode [`ForkId`](reth_primitives::ForkId) rlp value. + /// Failed to decode [`ForkId`](reth_ethereum_forks::ForkId) rlp value. #[error("failed to decode fork id, 'eth': {0:?}")] ForkIdDecodeError(#[from] alloy_rlp::Error), /// Peer is unreachable over discovery. diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index c99e3cc97f17..7a5da9d28f9e 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -17,13 +17,14 @@ use std::{ }; use ::enr::Enr; +use alloy_primitives::bytes::Bytes; use discv5::ListenConfig; use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; use rand::{Rng, RngCore}; -use reth_network_peers::PeerId; -use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord}; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; +use reth_network_peers::{NodeRecord, PeerId}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; use tracing::{debug, error, trace}; @@ -66,7 +67,7 @@ pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0; pub struct Discv5 { /// sigp/discv5 node. discv5: Arc, - /// [`IpMode`] of the the `RLPx` network. + /// [`IpMode`] of the `RLPx` network. rlpx_ip_mode: IpMode, /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. fork_key: Option<&'static [u8]>, @@ -652,7 +653,7 @@ pub async fn lookup( mod test { use super::*; use ::enr::{CombinedKey, EnrKey}; - use reth_primitives::MAINNET; + use reth_chainspec::MAINNET; use secp256k1::rand::thread_rng; use tracing::trace; @@ -777,11 +778,11 @@ mod test { #[allow(unused)] #[allow(clippy::assign_op_pattern)] mod sigp { + use alloy_primitives::U256; use enr::{ k256::sha2::digest::generic_array::{typenum::U32, GenericArray}, NodeId, }; - use reth_primitives::U256; /// A `Key` is a cryptographic hash, identifying both the nodes participating in /// the Kademlia DHT, as well as records stored in the DHT. diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs index be6814ad68b9..36369698a421 100644 --- a/crates/net/discv5/src/network_stack_id.rs +++ b/crates/net/discv5/src/network_stack_id.rs @@ -1,7 +1,7 @@ -//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network stack a node -//! belongs to. +//! Keys of ENR [`ForkId`](reth_ethereum_forks::ForkId) kv-pair. Identifies which network stack a +//! node belongs to. -use reth_primitives::ChainSpec; +use reth_chainspec::ChainSpec; /// Identifies which Ethereum network stack a node belongs to, on the discovery network. #[derive(Debug)] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 7859383014d0..2af72afcef65 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -13,11 +13,12 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true -reth-net-common.workspace = true +reth-ethereum-forks.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } +reth-tokio-util = { workspace = true, features = ["time"] } # ethereum +alloy-primitives.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } enr.workspace = true @@ -30,20 +31,21 @@ trust-dns-resolver = "0.23" # misc data-encoding = "2" -linked_hash_set = "0.1" +linked_hash_set.workspace = true schnellru.workspace = true thiserror.workspace = true tracing.workspace = true parking_lot.workspace = true serde = { workspace = true, optional = true } -serde_with = { version = "3.3.0", optional = true } +serde_with = { workspace = true, optional = true } [dev-dependencies] +reth-chainspec.workspace = true alloy-rlp.workspace = true +alloy-chains.workspace = true tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread"] } reth-tracing.workspace = true rand.workspace = true [features] -default = ["serde"] serde = ["dep:serde", "dep:serde_with"] diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index e10d3824f89d..80a360b88618 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,8 +22,8 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_network_peers::pk2id; -use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; +use reth_network_peers::{pk2id, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ @@ -411,9 +411,11 @@ fn convert_enr_node_record(enr: &Enr) -> Option mod tests { use super::*; use crate::tree::TreeRootEntry; + use alloy_chains::Chain; use alloy_rlp::{Decodable, Encodable}; use enr::EnrKey; - use reth_primitives::{Chain, ForkHash, Hardfork, MAINNET}; + use reth_chainspec::MAINNET; + use reth_ethereum_forks::{EthereumHardfork, ForkHash}; use secp256k1::rand::thread_rng; use std::{future::poll_fn, net::Ipv4Addr}; @@ -511,7 +513,7 @@ mod tests { resolver.insert(link.domain.clone(), root.to_string()); let mut builder = Enr::builder(); - let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); + let fork_id = MAINNET.hardfork_fork_id(EthereumHardfork::Frontier).unwrap(); builder .ip4(Ipv4Addr::LOCALHOST) .udp4(30303) diff --git a/crates/net/dns/src/query.rs b/crates/net/dns/src/query.rs index c8831cbbbc1b..bd24ff72c3ae 100644 --- a/crates/net/dns/src/query.rs +++ b/crates/net/dns/src/query.rs @@ -7,7 +7,7 @@ use crate::{ tree::{DnsEntry, LinkEntry, TreeRootEntry}, }; use enr::EnrKeyUnambiguous; -use reth_net_common::ratelimit::{Rate, RateLimit}; +use reth_tokio_util::ratelimit::{Rate, RateLimit}; use std::{ collections::VecDeque, future::Future, diff --git a/crates/net/dns/src/tree.rs b/crates/net/dns/src/tree.rs index bf8dd43520b2..16f66af5252f 100644 --- a/crates/net/dns/src/tree.rs +++ b/crates/net/dns/src/tree.rs @@ -21,9 +21,9 @@ use crate::error::{ ParseDnsEntryError::{FieldNotFound, UnknownEntry}, ParseEntryResult, }; +use alloy_primitives::{hex, Bytes}; use data_encoding::{BASE32_NOPAD, BASE64URL_NOPAD}; use enr::{Enr, EnrKey, EnrKeyUnambiguous, EnrPublicKey, Error as EnrError}; -use reth_primitives::{hex, Bytes}; use secp256k1::SecretKey; #[cfg(feature = "serde")] use serde_with::{DeserializeFromStr, SerializeDisplay}; diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index eb55d6e9bc4a..f17ce036d15e 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -13,13 +13,13 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true -reth-network-p2p.workspace = true -reth-tasks.workspace = true -reth-provider.workspace = true reth-config.workspace = true reth-consensus.workspace = true +reth-network-p2p.workspace = true reth-network-peers.workspace = true +reth-primitives.workspace = true +reth-storage-api.workspace = true +reth-tasks.workspace = true # optional deps for the test-utils feature reth-db = { workspace = true, optional = true } @@ -33,7 +33,7 @@ alloy-rlp.workspace = true futures.workspace = true futures-util.workspace = true pin-project.workspace = true -tokio = { workspace = true, features = ["sync"] } +tokio = { workspace = true, features = ["sync", "fs", "io-util"] } tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } @@ -50,6 +50,7 @@ tempfile = { workspace = true, optional = true } itertools.workspace = true [dev-dependencies] +reth-chainspec.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-db-api.workspace = true reth-consensus = { workspace = true, features = ["test-utils"] } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 7edfa098a20b..b4937eb417b9 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -13,7 +13,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::{BlockNumber, SealedHeader}; -use reth_provider::HeaderProvider; +use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, @@ -604,9 +604,10 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; + use reth_chainspec::MAINNET; use reth_consensus::test_utils::TestConsensus; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_primitives::{BlockBody, B256, MAINNET}; + use reth_primitives::{BlockBody, B256}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_testing_utils::{generators, generators::random_block_range}; use std::collections::HashMap; diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 345b22f71180..d8705c1b6aaa 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -45,7 +45,7 @@ impl TaskDownloader { /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; - /// use reth_provider::HeaderProvider; + /// use reth_storage_api::HeaderProvider; /// use std::sync::Arc; /// /// fn t( diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 728b7c8f2f37..56e07ea674b0 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -5,13 +5,12 @@ use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, error::RequestError, - headers::client::{HeadersClient, HeadersFut, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, priority::Priority, }; use reth_network_peers::PeerId; use reth_primitives::{ - BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, - SealedHeader, B256, + BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, B256, }; use std::{collections::HashMap, io, path::Path}; use thiserror::Error; @@ -228,15 +227,7 @@ impl FromReader for FileClient { // add to the internal maps headers.insert(block.header.number, block.header.clone()); hash_to_number.insert(block_hash, block.header.number); - bodies.insert( - block_hash, - BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - requests: block.requests, - }, - ); + bodies.insert(block_hash, block.into()); if log_interval == 0 { trace!(target: "downloaders::file", @@ -419,26 +410,22 @@ impl ChunkedFileReader { let new_read_bytes_target_len = chunk_target_len - old_bytes_len; // read new bytes from file - let mut reader = BytesMut::zeroed(new_read_bytes_target_len as usize); + let prev_read_bytes_len = self.chunk.len(); + self.chunk.extend(std::iter::repeat(0).take(new_read_bytes_target_len as usize)); + let reader = &mut self.chunk[prev_read_bytes_len..]; // actual bytes that have been read - let new_read_bytes_len = self.file.read_exact(&mut reader).await? as u64; + let new_read_bytes_len = self.file.read_exact(reader).await? as u64; + let next_chunk_byte_len = self.chunk.len(); // update remaining file length self.file_byte_len -= new_read_bytes_len; - let prev_read_bytes_len = self.chunk.len(); - - // read new bytes from file into chunk - self.chunk.extend_from_slice(&reader[..]); - let next_chunk_byte_len = self.chunk.len(); - debug!(target: "downloaders::file", max_chunk_byte_len=self.chunk_byte_len, prev_read_bytes_len, new_read_bytes_target_len, new_read_bytes_len, - reader_capacity=reader.capacity(), next_chunk_byte_len, remaining_file_byte_len=self.file_byte_len, "new bytes were read from file" diff --git a/crates/net/downloaders/src/file_codec_ovm_receipt.rs b/crates/net/downloaders/src/file_codec_ovm_receipt.rs index ca666fffb8ff..f18f4f19fdb0 100644 --- a/crates/net/downloaders/src/file_codec_ovm_receipt.rs +++ b/crates/net/downloaders/src/file_codec_ovm_receipt.rs @@ -22,7 +22,7 @@ use crate::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockN /// /// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set /// the capacity of the framed reader to the size of the file. -#[derive(Debug)] +#[derive(Debug, Default)] pub struct HackReceiptFileCodec; impl Decoder for HackReceiptFileCodec { diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index badcef3b0f76..9f98fedcd23f 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -10,16 +10,14 @@ use reth_consensus::Consensus; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ - client::{HeadersClient, HeadersRequest}, + client::{HeadersClient, HeadersDirection, HeadersRequest}, downloader::{validate_header_download, HeaderDownloader, SyncTarget}, error::{HeadersDownloaderError, HeadersDownloaderResult}, }, priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, SealedHeader, B256, -}; +use reth_primitives::{BlockHashOrNumber, BlockNumber, GotExpected, Header, SealedHeader, B256}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::{Ordering, Reverse}, diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index fb37cccdbe00..c77547364425 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -1,34 +1,83 @@ +use std::marker::PhantomData; + use futures::Future; use reth_primitives::{Receipt, Receipts}; use tokio::io::AsyncReadExt; use tokio_stream::StreamExt; -use tokio_util::codec::FramedRead; +use tokio_util::codec::{Decoder, FramedRead}; use tracing::trace; -use crate::{ - file_client::{FileClientError, FromReader}, - file_codec_ovm_receipt::HackReceiptFileCodec, -}; +use crate::file_client::{FileClientError, FromReader}; /// File client for reading RLP encoded receipts from file. Receipts in file must be in sequential /// order w.r.t. block number. #[derive(Debug)] -pub struct ReceiptFileClient { +pub struct ReceiptFileClient { /// The buffered receipts, read from file, as nested lists. One list per block number. pub receipts: Receipts, /// First (lowest) block number read from file. pub first_block: u64, /// Total number of receipts. Count of elements in [`Receipts`] flattened. pub total_receipts: usize, + /// marker + _marker: PhantomData, +} + +/// Constructs a file client from a reader and decoder. +pub trait FromReceiptReader { + /// Error returned by file client type. + type Error: From; + + /// Returns a decoder instance + fn decoder() -> D; + + /// Returns a file client + fn from_receipt_reader( + reader: B, + decoder: D, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + Self: Sized, + B: AsyncReadExt + Unpin; } -impl FromReader for ReceiptFileClient { - type Error = FileClientError; +impl FromReader for ReceiptFileClient +where + D: Decoder, Error = FileClientError> + + std::fmt::Debug + + Default, +{ + type Error = D::Error; + + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + Self::from_receipt_reader(reader, Self::decoder(), num_bytes) + } +} + +impl FromReceiptReader for ReceiptFileClient +where + D: Decoder, Error = FileClientError> + + std::fmt::Debug + + Default, +{ + type Error = D::Error; + + fn decoder() -> D { + Default::default() + } /// Initialize the [`ReceiptFileClient`] from bytes that have been read from file. Caution! If /// first block has no transactions, it's assumed to be the genesis block. - fn from_reader( + fn from_receipt_reader( reader: B, + decoder: D, num_bytes: u64, ) -> impl Future), Self::Error>> where @@ -37,13 +86,12 @@ impl FromReader for ReceiptFileClient { let mut receipts = Receipts::default(); // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = - FramedRead::with_capacity(reader, HackReceiptFileCodec, num_bytes as usize); + let mut stream = FramedRead::with_capacity(reader, decoder, num_bytes as usize); trace!(target: "downloaders::file", target_num_bytes=num_bytes, capacity=stream.read_buffer().capacity(), - codec=?HackReceiptFileCodec, + codec=?Self::decoder(), "init decode stream" ); @@ -149,7 +197,12 @@ impl FromReader for ReceiptFileClient { ); Ok(( - Self { receipts, first_block: first_block.unwrap_or_default(), total_receipts }, + Self { + receipts, + first_block: first_block.unwrap_or_default(), + total_receipts, + _marker: Default::default(), + }, remaining_bytes, )) } @@ -170,13 +223,16 @@ mod test { use reth_primitives::hex; use reth_tracing::init_test_tracing; - use crate::file_codec_ovm_receipt::test::{ - receipt_block_1 as op_mainnet_receipt_block_1, - receipt_block_2 as op_mainnet_receipt_block_2, - receipt_block_3 as op_mainnet_receipt_block_3, - HACK_RECEIPT_ENCODED_BLOCK_1 as HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET, - HACK_RECEIPT_ENCODED_BLOCK_2 as HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET, - HACK_RECEIPT_ENCODED_BLOCK_3 as HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET, + use crate::file_codec_ovm_receipt::{ + test::{ + receipt_block_1 as op_mainnet_receipt_block_1, + receipt_block_2 as op_mainnet_receipt_block_2, + receipt_block_3 as op_mainnet_receipt_block_3, + HACK_RECEIPT_ENCODED_BLOCK_1 as HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_2 as HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_3 as HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET, + }, + HackReceiptFileCodec, }; use super::*; @@ -199,8 +255,12 @@ mod test { let encoded_byte_len = encoded_receipts.len() as u64; let reader = &mut &encoded_receipts[..]; - let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = - ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + let ( + ReceiptFileClient { receipts, first_block, total_receipts, _marker }, + _remaining_bytes, + ) = ReceiptFileClient::::from_reader(reader, encoded_byte_len) + .await + .unwrap(); // 2 non-empty receipt objects assert_eq!(2, total_receipts); @@ -227,8 +287,12 @@ mod test { let encoded_byte_len = encoded_receipts.len() as u64; let reader = &mut &encoded_receipts[..]; - let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = - ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + let ( + ReceiptFileClient { receipts, first_block, total_receipts, _marker }, + _remaining_bytes, + ) = ReceiptFileClient::::from_reader(reader, encoded_byte_len) + .await + .unwrap(); // 2 non-empty receipt objects assert_eq!(2, total_receipts); @@ -256,8 +320,12 @@ mod test { let encoded_byte_len = encoded_receipts.len() as u64; let reader = &mut &encoded_receipts[..]; - let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = - ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + let ( + ReceiptFileClient { receipts, first_block, total_receipts, _marker }, + _remaining_bytes, + ) = ReceiptFileClient::::from_reader(reader, encoded_byte_len) + .await + .unwrap(); // 4 non-empty receipt objects assert_eq!(4, total_receipts); diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index c4dc29df9eed..eb2a0b023b3f 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -11,19 +11,18 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives.workspace = true -reth-net-common.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } -alloy-rlp = { workspace = true, features = ["derive"] } +alloy-primitives = { workspace = true, features = ["rand", "rlp"] } +alloy-rlp = { workspace = true, features = ["derive", "arrayvec"] } + futures.workspace = true thiserror.workspace = true -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, features = ["time"] } tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } pin-project.workspace = true -educe = "0.4.19" tracing.workspace = true # HeaderBytes @@ -37,7 +36,7 @@ ctr = "0.9.2" digest = "0.10.5" secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } concat-kdf = "0.1.0" -sha2 = "0.10.6" +sha2.workspace = true sha3 = "0.10.5" aes = "0.8.1" hmac = "0.12.1" diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index cc56240f4641..ad40a9da45da 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -7,17 +7,16 @@ use crate::{ ECIESError, }; use aes::{cipher::StreamCipher, Aes128, Aes256}; +use alloy_primitives::{ + bytes::{BufMut, Bytes, BytesMut}, + B128, B256, B512 as PeerId, +}; use alloy_rlp::{Encodable, Rlp, RlpEncodable, RlpMaxEncodedLen}; use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use ctr::Ctr64BE; use digest::{crypto_common::KeyIvInit, Digest}; -use educe::Educe; use rand::{thread_rng, Rng}; use reth_network_peers::{id2pk, pk2id}; -use reth_primitives::{ - bytes::{BufMut, Bytes, BytesMut}, - B128, B256, B512 as PeerId, -}; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, PublicKey, SecretKey, SECP256K1, @@ -51,17 +50,13 @@ fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) { concat_kdf::derive_key_into::(secret.as_slice(), s1, dest).unwrap(); } -#[derive(Educe)] -#[educe(Debug)] pub struct ECIES { - #[educe(Debug(ignore))] secret_key: SecretKey, public_key: PublicKey, remote_public_key: Option, pub(crate) remote_id: Option, - #[educe(Debug(ignore))] ephemeral_secret_key: SecretKey, ephemeral_public_key: PublicKey, ephemeral_shared_secret: Option, @@ -70,9 +65,7 @@ pub struct ECIES { nonce: B256, remote_nonce: Option, - #[educe(Debug(ignore))] ingress_aes: Option>, - #[educe(Debug(ignore))] egress_aes: Option>, ingress_mac: Option, egress_mac: Option, @@ -83,6 +76,27 @@ pub struct ECIES { body_size: Option, } +impl core::fmt::Debug for ECIES { + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ECIES") + .field("public_key", &self.public_key) + .field("remote_public_key", &self.remote_public_key) + .field("remote_id", &self.remote_id) + .field("ephemeral_public_key", &self.ephemeral_public_key) + .field("ephemeral_shared_secret", &self.ephemeral_shared_secret) + .field("remote_ephemeral_public_key", &self.remote_ephemeral_public_key) + .field("nonce", &self.nonce) + .field("remote_nonce", &self.remote_nonce) + .field("ingress_mac", &self.ingress_mac) + .field("egress_mac", &self.egress_mac) + .field("init_msg", &self.init_msg) + .field("remote_init_msg", &self.remote_init_msg) + .field("body_size", &self.body_size) + .finish() + } +} + fn split_at_mut(arr: &mut [T], idx: usize) -> Result<(&mut [T], &mut [T]), ECIESError> { if idx > arr.len() { return Err(ECIESErrorImpl::OutOfBounds { idx, len: arr.len() }.into()); @@ -721,7 +735,7 @@ impl ECIES { #[cfg(test)] mod tests { use super::*; - use reth_primitives::{b256, hex}; + use alloy_primitives::{b256, hex}; #[test] fn ecdh() { diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index 5b701f6a1d8e..1292d2eb543d 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -1,5 +1,7 @@ +//! This contains the main codec for `RLPx` ECIES messages + use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue}; -use reth_primitives::{BytesMut, B512 as PeerId}; +use alloy_primitives::{bytes::BytesMut, B512 as PeerId}; use secp256k1::SecretKey; use std::{fmt::Debug, io}; use tokio_util::codec::{Decoder, Encoder}; @@ -7,14 +9,14 @@ use tracing::{instrument, trace}; /// Tokio codec for ECIES #[derive(Debug)] -pub(crate) struct ECIESCodec { +pub struct ECIESCodec { ecies: ECIES, state: ECIESState, } /// Current ECIES state of a connection #[derive(Clone, Copy, Debug, PartialEq, Eq)] -enum ECIESState { +pub enum ECIESState { /// The first stage of the ECIES handshake, where each side of the connection sends an auth /// message containing the ephemeral public key, signature of the public key, nonce, and other /// metadata. @@ -23,7 +25,12 @@ enum ECIESState { /// The second stage of the ECIES handshake, where each side of the connection sends an ack /// message containing the nonce and other metadata. Ack, + + /// The third stage of the ECIES handshake, where header is parsed, message integrity checks + /// performed, and message is decrypted. Header, + + /// The final stage, where the ECIES message is actually read and returned by the ECIES codec. Body, } @@ -43,7 +50,7 @@ impl Decoder for ECIESCodec { type Item = IngressECIESValue; type Error = ECIESError; - #[instrument(level = "trace", skip_all, fields(peer=&*format!("{:?}", self.ecies.remote_id.map(|s| s.to_string())), state=&*format!("{:?}", self.state)))] + #[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { loop { match self.state { @@ -115,7 +122,7 @@ impl Decoder for ECIESCodec { impl Encoder for ECIESCodec { type Error = io::Error; - #[instrument(level = "trace", skip(self, buf), fields(peer=&*format!("{:?}", self.ecies.remote_id.map(|s| s.to_string())), state=&*format!("{:?}", self.state)))] + #[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { match item { EgressECIESValue::Auth => { diff --git a/crates/net/ecies/src/lib.rs b/crates/net/ecies/src/lib.rs index 07fb044c5cde..f766b48b21cb 100644 --- a/crates/net/ecies/src/lib.rs +++ b/crates/net/ecies/src/lib.rs @@ -16,9 +16,9 @@ pub mod util; mod error; pub use error::ECIESError; -mod codec; +pub mod codec; -use reth_primitives::{ +use alloy_primitives::{ bytes::{Bytes, BytesMut}, B512 as PeerId, }; diff --git a/crates/net/ecies/src/mac.rs b/crates/net/ecies/src/mac.rs index 30baa298c965..03847d091eed 100644 --- a/crates/net/ecies/src/mac.rs +++ b/crates/net/ecies/src/mac.rs @@ -10,11 +10,11 @@ //! For more information, refer to the [Ethereum MAC specification](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac). use aes::Aes256Enc; +use alloy_primitives::{B128, B256}; use block_padding::NoPadding; use cipher::BlockEncrypt; use digest::KeyInit; use generic_array::GenericArray; -use reth_primitives::{B128, B256}; use sha3::{Digest, Keccak256}; use typenum::U16; diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 02c834fe0dcf..dbd7577a35a8 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -3,12 +3,11 @@ use crate::{ codec::ECIESCodec, error::ECIESErrorImpl, ECIESError, EgressECIESValue, IngressECIESValue, }; -use futures::{ready, Sink, SinkExt}; -use reth_net_common::stream::HasRemoteAddr; -use reth_primitives::{ +use alloy_primitives::{ bytes::{Bytes, BytesMut}, B512 as PeerId, }; +use futures::{ready, Sink, SinkExt}; use secp256k1::SecretKey; use std::{ fmt::Debug, @@ -38,10 +37,10 @@ pub struct ECIESStream { impl ECIESStream where - Io: AsyncRead + AsyncWrite + Unpin + HasRemoteAddr, + Io: AsyncRead + AsyncWrite + Unpin, { /// Connect to an `ECIES` server - #[instrument(skip(transport, secret_key), fields(peer=&*format!("{:?}", transport.remote_addr())))] + #[instrument(skip(transport, secret_key))] pub async fn connect( transport: Io, secret_key: SecretKey, @@ -98,7 +97,6 @@ where } /// Listen on a just connected ECIES client - #[instrument(skip_all, fields(peer=&*format!("{:?}", transport.remote_addr())))] pub async fn incoming(transport: Io, secret_key: SecretKey) -> Result { let ecies = ECIESCodec::new_server(secret_key)?; diff --git a/crates/net/ecies/src/util.rs b/crates/net/ecies/src/util.rs index f6b30288a4ca..cffd1f19dede 100644 --- a/crates/net/ecies/src/util.rs +++ b/crates/net/ecies/src/util.rs @@ -1,7 +1,7 @@ //! Utility functions for hashing and encoding. +use alloy_primitives::B256; use hmac::{Hmac, Mac}; -use reth_primitives::B256; use sha2::{Digest, Sha256}; /// Hashes the input data with SHA256. diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 0ed24a64e10e..671883dae68e 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -13,9 +13,14 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true + +# ethereum +alloy-chains = { workspace = true, features = ["rlp"] } alloy-rlp = { workspace = true, features = ["derive"] } +alloy-genesis.workspace = true bytes.workspace = true derive_more.workspace = true @@ -25,28 +30,23 @@ serde = { workspace = true, optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } +proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] -reth-net-common.workspace = true reth-primitives = { workspace = true, features = ["arbitrary"] } -reth-tracing.workspace = true - -test-fuzz.workspace = true -tokio-util = { workspace = true, features = ["io", "codec"] } -rand.workspace = true - +alloy-chains = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true -async-stream.workspace = true +rand.workspace = true [features] -default = ["serde"] -serde = ["dep:serde"] arbitrary = [ "reth-primitives/arbitrary", + "alloy-chains/arbitrary", "dep:arbitrary", "dep:proptest", - "dep:proptest-derive", + "dep:proptest-arbitrary-interop", ] +serde = ["dep:serde"] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 97268ee642c6..f6a8b020d3ef 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -1,17 +1,10 @@ //! Implements the `GetBlockHeaders`, `GetBlockBodies`, `BlockHeaders`, and `BlockBodies` message //! types. +use crate::HeadersDirection; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use reth_codecs_derive::{add_arbitrary_tests, derive_arbitrary}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; - -#[cfg(any(test, feature = "arbitrary"))] -use proptest::{collection::vec, prelude::*}; -#[cfg(any(test, feature = "arbitrary"))] -use reth_primitives::{generate_valid_header, valid_header_strategy}; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, B256}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -24,7 +17,7 @@ use serde::{Deserialize, Serialize}; /// in the direction specified by [`reverse`](#structfield.reverse). #[derive_arbitrary(rlp)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetBlockHeaders { /// The block number or hash that the peer should start returning headers from. pub start_block: BlockHashOrNumber, @@ -44,25 +37,13 @@ pub struct GetBlockHeaders { /// The response to [`GetBlockHeaders`], containing headers if any headers were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(rlp, 10)] pub struct BlockHeaders( /// The requested headers. pub Vec
, ); -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for BlockHeaders { - type Parameters = (); - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - let headers_strategy = vec(valid_header_strategy(), 0..10); // Adjust the range as needed - - headers_strategy.prop_map(BlockHeaders).boxed() - } - - type Strategy = proptest::prelude::BoxedStrategy; -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -70,7 +51,7 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { let mut headers = Vec::with_capacity(headers_count); for _ in 0..headers_count { - headers.push(generate_valid_header( + headers.push(reth_primitives::generate_valid_header( u.arbitrary()?, u.arbitrary()?, u.arbitrary()?, @@ -92,7 +73,7 @@ impl From> for BlockHeaders { /// A request for a peer to return block bodies for the given block hashes. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetBlockBodies( /// The block hashes to request bodies for. pub Vec, @@ -108,15 +89,9 @@ impl From> for GetBlockBodies { /// any were found. #[derive_arbitrary(rlp, 16)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" - ) - )] pub Vec, ); @@ -128,11 +103,14 @@ impl From> for BlockBodies { #[cfg(test)] mod tests { - use crate::{message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}; + use crate::{ + message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, + HeadersDirection, + }; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, - TransactionSigned, TxKind, TxLegacy, U256, + hex, BlockHashOrNumber, Header, Signature, Transaction, TransactionSigned, TxKind, + TxLegacy, U256, }; use std::str::FromStr; diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 602356dda9b2..855bb6f462ea 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -19,14 +19,13 @@ use std::{ #[cfg(feature = "arbitrary")] use proptest::{collection::vec, prelude::*}; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +#[cfg(feature = "arbitrary")] +use proptest_arbitrary_interop::arb; /// This informs peers of new blocks that have appeared on the network. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NewBlockHashes( /// New block hashes and the block number for each blockhash. /// Clients should request blocks using a [`GetBlockBodies`](crate::GetBlockBodies) message. @@ -50,7 +49,7 @@ impl NewBlockHashes { /// A block hash _and_ a block number. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockHashNumber { /// The block hash pub hash: B256, @@ -73,7 +72,7 @@ impl From for Vec { /// A new block with the current total difficulty, which includes the difficulty of the returned /// block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive_arbitrary(rlp, 25)] pub struct NewBlock { /// A new block. @@ -86,7 +85,7 @@ pub struct NewBlock { /// in a block. #[derive_arbitrary(rlp, 10)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Transactions( /// New transactions for the peer to include in its mempool. pub Vec, @@ -291,7 +290,7 @@ impl From for NewPooledTransactionHashes { /// but have not been included in a block. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NewPooledTransactionHashes66( /// Transaction hashes for new transactions that have appeared on the network. /// Clients should request the transactions with the given hashes using a @@ -308,7 +307,7 @@ impl From> for NewPooledTransactionHashes66 { /// Same as [`NewPooledTransactionHashes66`] but extends that that beside the transaction hashes, /// the node sends the transaction types and their sizes (as defined in EIP-2718) as well. #[derive(Clone, Debug, PartialEq, Eq, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NewPooledTransactionHashes68 { /// Transaction types for new transactions that have appeared on the network. /// @@ -351,7 +350,7 @@ impl Arbitrary for NewPooledTransactionHashes68 { .prop_flat_map(|len| { // Use the generated length to create vectors of TxType, usize, and B256 let types_vec = - vec(any::().prop_map(|ty| ty as u8), len..=len); + vec(arb::().prop_map(|ty| ty as u8), len..=len); // Map the usize values to the range 0..131072(0x20000) let sizes_vec = vec(proptest::num::usize::ANY.prop_map(|x| x % 131072), len..=len); diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs new file mode 100644 index 000000000000..607d6ba3e2cb --- /dev/null +++ b/crates/net/eth-wire-types/src/header.rs @@ -0,0 +1,353 @@ +//! Header types. + +use alloy_rlp::{Decodable, Encodable}; +use bytes::BufMut; +use reth_codecs_derive::derive_arbitrary; + +/// Represents the direction for a headers request depending on the `reverse` field of the request. +/// > The response must contain a number of block headers, of rising number when reverse is 0, +/// > falling when 1 +/// +/// Ref: +/// +/// [`HeadersDirection::Rising`] block numbers for `reverse == 0 == false` +/// [`HeadersDirection::Falling`] block numbers for `reverse == 1 == true` +/// +/// See also +#[derive_arbitrary(rlp)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum HeadersDirection { + /// Falling block number. + Falling, + /// Rising block number. + #[default] + Rising, +} + +impl HeadersDirection { + /// Returns true for rising block numbers + pub const fn is_rising(&self) -> bool { + matches!(self, Self::Rising) + } + + /// Returns true for falling block numbers + pub const fn is_falling(&self) -> bool { + matches!(self, Self::Falling) + } + + /// Converts the bool into a direction. + /// + /// Returns: + /// + /// [`HeadersDirection::Rising`] block numbers for `reverse == 0 == false` + /// [`HeadersDirection::Falling`] block numbers for `reverse == 1 == true` + pub const fn new(reverse: bool) -> Self { + if reverse { + Self::Falling + } else { + Self::Rising + } + } +} + +impl Encodable for HeadersDirection { + fn encode(&self, out: &mut dyn BufMut) { + bool::from(*self).encode(out) + } + + fn length(&self) -> usize { + bool::from(*self).length() + } +} + +impl Decodable for HeadersDirection { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let value: bool = Decodable::decode(buf)?; + Ok(value.into()) + } +} + +impl From for HeadersDirection { + fn from(reverse: bool) -> Self { + Self::new(reverse) + } +} + +impl From for bool { + fn from(value: HeadersDirection) -> Self { + match value { + HeadersDirection::Rising => false, + HeadersDirection::Falling => true, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_rlp::{Decodable, Encodable}; + use reth_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, Header, B256, U256}; + use std::str::FromStr; + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn test_encode_block_header() { + let expected = hex!("f901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); + let header = Header { + difficulty: U256::from(0x8ae_u64), + number: 0xd05_u64, + gas_limit: 0x115c_u64, + gas_used: 0x15b3_u64, + timestamp: 0x1a0a_u64, + extra_data: Bytes::from_str("7788").unwrap(), + ommers_hash: B256::ZERO, + state_root: B256::ZERO, + transactions_root: B256::ZERO, + receipts_root: B256::ZERO, + ..Default::default() + }; + let mut data = vec![]; + header.encode(&mut data); + assert_eq!(hex::encode(&data), hex::encode(expected)); + assert_eq!(header.length(), data.len()); + } + + // Test vector from: https://github.com/ethereum/tests/blob/f47bbef4da376a49c8fc3166f09ab8a6d182f765/BlockchainTests/ValidBlocks/bcEIP1559/baseFee.json#L15-L36 + #[test] + fn test_eip1559_block_header_hash() { + let expected_hash = + B256::from_str("6a251c7c3c5dca7b42407a3752ff48f3bbca1fab7f9868371d9918daf1988d1f") + .unwrap(); + let header = Header { + parent_hash: b256!("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a"), + ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + beneficiary: address!("ba5e000000000000000000000000000000000000"), + state_root: b256!("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7"), + transactions_root: b256!("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf"), + receipts_root: b256!("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9"), + logs_bloom: bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + difficulty: U256::from(0x020000), + number: 0x01_u64, + gas_limit: 0x016345785d8a0000_u64, + gas_used: 0x015534_u64, + timestamp: 0x079e, + extra_data: bytes!("42"), + mix_hash: b256!("0000000000000000000000000000000000000000000000000000000000000000"), + nonce: 0, + base_fee_per_gas: Some(0x036b_u64), + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_root: None + }; + assert_eq!(header.hash_slow(), expected_hash); + } + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn test_decode_block_header() { + let data = hex!("f901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); + let expected = Header { + difficulty: U256::from(0x8aeu64), + number: 0xd05u64, + gas_limit: 0x115cu64, + gas_used: 0x15b3u64, + timestamp: 0x1a0au64, + extra_data: Bytes::from_str("7788").unwrap(), + ommers_hash: B256::ZERO, + state_root: B256::ZERO, + transactions_root: B256::ZERO, + receipts_root: B256::ZERO, + ..Default::default() + }; + let header =
::decode(&mut data.as_slice()).unwrap(); + assert_eq!(header, expected); + + // make sure the hash matches + let expected_hash = + B256::from_str("8c2f2af15b7b563b6ab1e09bed0e9caade7ed730aec98b70a993597a797579a9") + .unwrap(); + assert_eq!(header.hash_slow(), expected_hash); + } + + // Test vector from: https://github.com/ethereum/tests/blob/970503935aeb76f59adfa3b3224aabf25e77b83d/BlockchainTests/ValidBlocks/bcExample/shanghaiExample.json#L15-L34 + #[test] + fn test_decode_block_header_with_withdrawals() { + let data = hex!("f9021ca018db39e19931515b30b16b3a92c292398039e31d6c267111529c3f2ba0a26c17a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa095efce3d6972874ca8b531b233b7a1d1ff0a56f08b20c8f1b89bef1b001194a5a071e515dd89e8a7973402c2e11646081b4e2209b2d3a1550df5095289dabcb3fba0ed9c51ea52c968e552e370a77a41dac98606e98b915092fb5f949d6452fce1c4b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001887fffffffffffffff830125b882079e42a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b42188000000000000000009a027f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973"); + let expected = Header { + parent_hash: B256::from_str( + "18db39e19931515b30b16b3a92c292398039e31d6c267111529c3f2ba0a26c17", + ) + .unwrap(), + beneficiary: Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(), + state_root: B256::from_str( + "95efce3d6972874ca8b531b233b7a1d1ff0a56f08b20c8f1b89bef1b001194a5", + ) + .unwrap(), + transactions_root: B256::from_str( + "71e515dd89e8a7973402c2e11646081b4e2209b2d3a1550df5095289dabcb3fb", + ) + .unwrap(), + receipts_root: B256::from_str( + "ed9c51ea52c968e552e370a77a41dac98606e98b915092fb5f949d6452fce1c4", + ) + .unwrap(), + number: 0x01, + gas_limit: 0x7fffffffffffffff, + gas_used: 0x0125b8, + timestamp: 0x079e, + extra_data: Bytes::from_str("42").unwrap(), + mix_hash: B256::from_str( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + ) + .unwrap(), + base_fee_per_gas: Some(0x09), + withdrawals_root: Some( + B256::from_str("27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973") + .unwrap(), + ), + ..Default::default() + }; + let header =
::decode(&mut data.as_slice()).unwrap(); + assert_eq!(header, expected); + + let expected_hash = + B256::from_str("85fdec94c534fa0a1534720f167b899d1fc268925c71c0cbf5aaa213483f5a69") + .unwrap(); + assert_eq!(header.hash_slow(), expected_hash); + } + + // Test vector from: https://github.com/ethereum/tests/blob/7e9e0940c0fcdbead8af3078ede70f969109bd85/BlockchainTests/ValidBlocks/bcExample/cancunExample.json + #[test] + fn test_decode_block_header_with_blob_fields_ef_tests() { + let data = hex!("f90221a03a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa03c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406aea04409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9cea046cab26abf1047b5b119ecc2dda1296b071766c8b1307e1381fcecc90d513d86b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001887fffffffffffffff8302a86582079e42a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b42188000000000000000009a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b4218302000080"); + let expected = Header { + parent_hash: B256::from_str( + "3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", + ) + .unwrap(), + ommers_hash: B256::from_str( + "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + ) + .unwrap(), + beneficiary: Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(), + state_root: B256::from_str( + "3c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406ae", + ) + .unwrap(), + transactions_root: B256::from_str( + "4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce", + ) + .unwrap(), + receipts_root: B256::from_str( + "46cab26abf1047b5b119ecc2dda1296b071766c8b1307e1381fcecc90d513d86", + ) + .unwrap(), + logs_bloom: Default::default(), + difficulty: U256::from(0), + number: 0x1, + gas_limit: 0x7fffffffffffffff, + gas_used: 0x02a865, + timestamp: 0x079e, + extra_data: Bytes::from(vec![0x42]), + mix_hash: B256::from_str( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + ) + .unwrap(), + nonce: 0, + base_fee_per_gas: Some(9), + withdrawals_root: Some( + B256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + .unwrap(), + ), + blob_gas_used: Some(0x020000), + excess_blob_gas: Some(0), + parent_beacon_block_root: None, + requests_root: None, + }; + + let header = Header::decode(&mut data.as_slice()).unwrap(); + assert_eq!(header, expected); + + let expected_hash = + B256::from_str("0x10aca3ebb4cf6ddd9e945a5db19385f9c105ede7374380c50d56384c3d233785") + .unwrap(); + assert_eq!(header.hash_slow(), expected_hash); + } + + #[test] + fn test_decode_block_header_with_blob_fields() { + // Block from devnet-7 + let data = hex!("f90239a013a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794f97e180c050e5ab072211ad2c213eb5aee4df134a0ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080830305988401c9c380808464c40d5499d883010c01846765746888676f312e32302e35856c696e7578a070ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f232588000000000000000007a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421808401600000"); + let expected = Header { + parent_hash: B256::from_str( + "13a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5", + ) + .unwrap(), + ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + beneficiary: address!("f97e180c050e5ab072211ad2c213eb5aee4df134"), + state_root: b256!("ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a"), + transactions_root: b256!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ), + receipts_root: b256!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ), + logs_bloom: Default::default(), + difficulty: U256::from(0), + number: 0x30598, + gas_limit: 0x1c9c380, + gas_used: 0, + timestamp: 0x64c40d54, + extra_data: bytes!("d883010c01846765746888676f312e32302e35856c696e7578"), + mix_hash: b256!("70ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f2325"), + nonce: 0, + base_fee_per_gas: Some(7), + withdrawals_root: Some(b256!( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + )), + parent_beacon_block_root: None, + blob_gas_used: Some(0), + excess_blob_gas: Some(0x1600000), + requests_root: None, + }; + + let header = Header::decode(&mut data.as_slice()).unwrap(); + assert_eq!(header, expected); + + let expected_hash = + b256!("539c9ea0a3ca49808799d3964b8b6607037227de26bc51073c6926963127087b"); + assert_eq!(header.hash_slow(), expected_hash); + } + + #[test] + fn sanity_direction() { + let reverse = true; + assert_eq!(HeadersDirection::Falling, reverse.into()); + assert_eq!(reverse, bool::from(HeadersDirection::Falling)); + + let reverse = false; + assert_eq!(HeadersDirection::Rising, reverse.into()); + assert_eq!(reverse, bool::from(HeadersDirection::Rising)); + + let mut buf = Vec::new(); + let direction = HeadersDirection::Falling; + direction.encode(&mut buf); + assert_eq!(direction, HeadersDirection::decode(&mut buf.as_slice()).unwrap()); + + let mut buf = Vec::new(); + let direction = HeadersDirection::Rising; + direction.encode(&mut buf); + assert_eq!(direction, HeadersDirection::decode(&mut buf.as_slice()).unwrap()); + } + + #[test] + fn test_decode_block_header_with_invalid_blob_gas_used() { + // This should error because the blob_gas_used is too large + let data = hex!("f90242a013a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794f97e180c050e5ab072211ad2c213eb5aee4df134a0ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080830305988401c9c380808464c40d5499d883010c01846765746888676f312e32302e35856c696e7578a070ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f232588000000000000000007a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421891122334455667788998401600000"); + Header::decode(&mut data.as_slice()) + .expect_err("blob_gas_used size should make this header decoding fail"); + } +} diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index a60fa4c8c1e9..f14bc4739824 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![allow(clippy::needless_lifetimes)] // side effect of optimism fields #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -20,6 +18,9 @@ pub use version::EthVersion; pub mod message; pub use message::{EthMessage, EthMessageID, ProtocolMessage}; +pub mod header; +pub use header::*; + pub mod blocks; pub use blocks::*; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 61ff55b1cf77..378a5f6ea95f 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -15,8 +15,6 @@ use crate::{EthVersion, SharedTransactions}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use reth_primitives::bytes::{Buf, BufMut}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::{fmt::Debug, sync::Arc}; /// [`MAX_MESSAGE_SIZE`] is the maximum cap on the size of a protocol message. @@ -36,7 +34,7 @@ pub enum MessageError { /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct ProtocolMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, @@ -182,7 +180,7 @@ impl From for ProtocolBroadcastMessage { /// it, `NewPooledTransactionHashes` is renamed as [`NewPooledTransactionHashes66`] and /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), @@ -333,7 +331,7 @@ impl Encodable for EthBroadcastMessage { /// Represents message IDs for eth protocol messages. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EthMessageID { /// Status message. Status = 0x00, @@ -437,7 +435,7 @@ impl TryFrom for EthMessageID { /// This can represent either a request or a response, since both include a message payload and /// request id. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct RequestPair { /// id for the contained request or response message pub request_id: u64, diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 72424b0bd0a7..cbe74f9642ec 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -4,13 +4,10 @@ use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ReceiptWithBloom, B256}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// A request for transaction receipts from the given block hashes. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetReceipts( /// The block hashes to request receipts for. pub Vec, @@ -20,15 +17,9 @@ pub struct GetReceipts( /// requested. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::collection::vec(proptest::arbitrary::any::(), 0..=50), 0..=5)" - ) - )] pub Vec>, ); diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 5f3dc833950f..aa1e064d0818 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -4,15 +4,12 @@ use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::derive_arbitrary; use reth_primitives::{Bytes, B256}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// A request for state tree nodes corresponding to the given hashes. /// This message was removed in `eth/67`, only clients running `eth/66` or earlier will respond to /// this message. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetNodeData(pub Vec); /// The response to [`GetNodeData`], containing the state tree nodes or contract bytecode @@ -22,7 +19,7 @@ pub struct GetNodeData(pub Vec); /// This message was removed in `eth/67`. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NodeData(pub Vec); #[cfg(test)] diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index 904df65c61c7..8d4626900661 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -1,11 +1,10 @@ use crate::EthVersion; +use alloy_chains::{Chain, NamedChain}; +use alloy_genesis::Genesis; use alloy_rlp::{RlpDecodable, RlpEncodable}; +use reth_chainspec::{ChainSpec, MAINNET}; use reth_codecs_derive::derive_arbitrary; -use reth_primitives::{ - hex, Chain, ChainSpec, ForkId, Genesis, Hardfork, Head, NamedChain, B256, MAINNET, U256, -}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +use reth_primitives::{hex, EthereumHardfork, ForkId, Head, B256, U256}; use std::fmt::{Debug, Display}; /// The status message is used in the eth protocol handshake to ensure that peers are on the same @@ -15,7 +14,7 @@ use std::fmt::{Debug, Display}; /// hash. This information should be treated as untrusted. #[derive_arbitrary(rlp)] #[derive(Copy, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Status { /// The current protocol version. For example, peers running `eth/66` would have a version of /// 66. @@ -142,7 +141,7 @@ impl Default for Status { blockhash: mainnet_genesis, genesis: mainnet_genesis, forkid: MAINNET - .hardfork_fork_id(Hardfork::Frontier) + .hardfork_fork_id(EthereumHardfork::Frontier) .expect("The Frontier hardfork should always exist"), } } @@ -152,8 +151,9 @@ impl Default for Status { /// /// # Example /// ``` +/// use reth_chainspec::{Chain, EthereumHardfork, MAINNET}; /// use reth_eth_wire_types::{EthVersion, Status}; -/// use reth_primitives::{Chain, Hardfork, B256, MAINNET, MAINNET_GENESIS_HASH, U256}; +/// use reth_primitives::{B256, MAINNET_GENESIS_HASH, U256}; /// /// // this is just an example status message! /// let status = Status::builder() @@ -162,7 +162,7 @@ impl Default for Status { /// .total_difficulty(U256::from(100)) /// .blockhash(B256::from(MAINNET_GENESIS_HASH)) /// .genesis(B256::from(MAINNET_GENESIS_HASH)) -/// .forkid(MAINNET.hardfork_fork_id(Hardfork::Paris).unwrap()) +/// .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Paris).unwrap()) /// .build(); /// /// assert_eq!( @@ -173,7 +173,7 @@ impl Default for Status { /// total_difficulty: U256::from(100), /// blockhash: B256::from(MAINNET_GENESIS_HASH), /// genesis: B256::from(MAINNET_GENESIS_HASH), -/// forkid: MAINNET.hardfork_fork_id(Hardfork::Paris).unwrap(), +/// forkid: MAINNET.hardfork_fork_id(EthereumHardfork::Paris).unwrap(), /// } /// ); /// ``` @@ -228,12 +228,11 @@ impl StatusBuilder { #[cfg(test)] mod tests { use crate::{EthVersion, Status}; + use alloy_genesis::Genesis; use alloy_rlp::{Decodable, Encodable}; use rand::Rng; - use reth_primitives::{ - hex, Chain, ChainSpec, ForkCondition, ForkHash, ForkId, Genesis, Hardfork, Head, - NamedChain, B256, U256, - }; + use reth_chainspec::{Chain, ChainSpec, ForkCondition, NamedChain}; + use reth_primitives::{hex, EthereumHardfork, ForkHash, ForkId, Head, B256, U256}; use std::str::FromStr; #[test] @@ -368,12 +367,12 @@ mod tests { // add a few hardforks let hardforks = vec![ - (Hardfork::Tangerine, ForkCondition::Block(1)), - (Hardfork::SpuriousDragon, ForkCondition::Block(2)), - (Hardfork::Byzantium, ForkCondition::Block(3)), - (Hardfork::MuirGlacier, ForkCondition::Block(5)), - (Hardfork::London, ForkCondition::Block(8)), - (Hardfork::Shanghai, ForkCondition::Timestamp(13)), + (EthereumHardfork::Tangerine, ForkCondition::Block(1)), + (EthereumHardfork::SpuriousDragon, ForkCondition::Block(2)), + (EthereumHardfork::Byzantium, ForkCondition::Block(3)), + (EthereumHardfork::MuirGlacier, ForkCondition::Block(5)), + (EthereumHardfork::London, ForkCondition::Block(8)), + (EthereumHardfork::Shanghai, ForkCondition::Timestamp(13)), ]; let mut chainspec = ChainSpec::builder().genesis(genesis).chain(Chain::from_id(1337)); diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index d0a42d49beec..a5bf40b798b5 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -7,13 +7,10 @@ use reth_primitives::{ transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, B256, }; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// A list of transaction hashes that the peer would like transaction bodies for. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetPooledTransactions( /// The transaction hashes to request transaction bodies for. pub Vec, @@ -48,7 +45,7 @@ where Deref, Constructor, )] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. pub Vec, diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 852cc74f9a1d..2846c0f7cf02 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -13,11 +13,11 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-codecs.workspace = true reth-primitives.workspace = true reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } -reth-discv4.workspace = true reth-eth-wire-types.workspace = true reth-network-peers.workspace = true @@ -28,7 +28,7 @@ bytes.workspace = true derive_more.workspace = true thiserror.workspace = true serde = { workspace = true, optional = true } -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, features = ["macros", "net", "sync", "time"] } tokio-util = { workspace = true, features = ["io", "codec"] } futures.workspace = true tokio-stream.workspace = true @@ -38,15 +38,13 @@ snap = "1.0.5" # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } [dev-dependencies] -reth-net-common.workspace = true reth-primitives = { workspace = true, features = ["arbitrary"] } reth-tracing.workspace = true test-fuzz.workspace = true +tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true secp256k1 = { workspace = true, features = [ @@ -57,19 +55,18 @@ secp256k1 = { workspace = true, features = [ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true async-stream.workspace = true +serde.workspace = true [features] -default = ["serde"] -serde = ["dep:serde"] arbitrary = [ "reth-primitives/arbitrary", + "reth-eth-wire-types/arbitrary", "dep:arbitrary", - "dep:proptest", - "dep:proptest-derive", ] -optimism = ["reth-primitives/optimism"] +serde = ["dep:serde", "reth-eth-wire-types/serde"] [[test]] name = "fuzz_roundtrip" diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index b7dd3e257cd5..011135ce0494 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -129,22 +129,6 @@ impl<'a> arbitrary::Arbitrary<'a> for Capability { } } -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for Capability { - type Parameters = proptest::arbitrary::ParamsFor; - fn arbitrary_with(args: Self::Parameters) -> Self::Strategy { - use proptest::strategy::Strategy; - proptest::arbitrary::any_with::(args) // TODO: what possible values? - .prop_flat_map(move |name| { - proptest::arbitrary::any_with::(()) // TODO: What's the max? - .prop_map(move |version| Self::new(name.clone(), version)) - }) - .boxed() - } - - type Strategy = proptest::strategy::BoxedStrategy; -} - /// Represents all capabilities of a node. #[derive(Debug, Clone, Eq, PartialEq)] pub struct Capabilities { diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 611c10e7425e..1bd803b53226 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -3,7 +3,8 @@ use crate::{ errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason, }; -use reth_primitives::{Chain, GotExpected, GotExpectedBoxed, ValidationError, B256}; +use reth_chainspec::Chain; +use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError, B256}; use std::io; /// Errors when sending/receiving messages diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 3d987dad8f87..4ee802beb6ab 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -348,14 +348,15 @@ mod tests { use crate::{ broadcast::BlockHashNumber, errors::{EthHandshakeError, EthStreamError}, + hello::DEFAULT_TCP_PORT, p2pstream::{ProtocolVersion, UnauthedP2PStream}, EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, Status, }; use futures::{SinkExt, StreamExt}; - use reth_discv4::DEFAULT_DISCOVERY_PORT; + use reth_chainspec::NamedChain; use reth_ecies::stream::ECIESStream; use reth_network_peers::pk2id; - use reth_primitives::{ForkFilter, Head, NamedChain, B256, U256}; + use reth_primitives::{ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; @@ -623,7 +624,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), protocols: vec![EthVersion::Eth67.into()], - port: DEFAULT_DISCOVERY_PORT, + port: DEFAULT_TCP_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; @@ -651,7 +652,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), protocols: vec![EthVersion::Eth67.into()], - port: DEFAULT_DISCOVERY_PORT, + port: DEFAULT_TCP_PORT, id: pk2id(&client_key.public_key(SECP256K1)), }; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index fbdffecec38a..2e95e2c7e4e9 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -1,10 +1,14 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::derive_arbitrary; -use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_network_peers::PeerId; use reth_primitives::constants::RETH_CLIENT_VERSION; +/// The default tcp port for p2p. +/// +/// Note: this is the same as discovery port: `DEFAULT_DISCOVERY_PORT` +pub(crate) const DEFAULT_TCP_PORT: u16 = 30303; + use crate::protocol::Protocol; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -29,6 +33,8 @@ pub struct HelloMessageWithProtocols { /// The list of supported capabilities and their versions. pub protocols: Vec, /// The port that the client is listening on, zero indicates the client is not listening. + /// + /// By default this is `30303` which is the same as the default discovery port. pub port: u16, /// The secp256k1 public key corresponding to the node's private key. pub id: PeerId, @@ -200,7 +206,7 @@ impl HelloMessageBuilder { protocols: protocols.unwrap_or_else(|| { vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()] }), - port: port.unwrap_or(DEFAULT_DISCOVERY_PORT), + port: port.unwrap_or(DEFAULT_TCP_PORT), id, } } @@ -208,14 +214,12 @@ impl HelloMessageBuilder { #[cfg(test)] mod tests { - use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; - use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_network_peers::pk2id; - use secp256k1::{SecretKey, SECP256K1}; - use crate::{ capability::Capability, p2pstream::P2PMessage, EthVersion, HelloMessage, ProtocolVersion, }; + use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; + use reth_network_peers::pk2id; + use secp256k1::{SecretKey, SECP256K1}; #[test] fn test_hello_encoding_round_trip() { @@ -225,7 +229,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)], - port: DEFAULT_DISCOVERY_PORT, + port: 30303, id, }); @@ -245,7 +249,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)], - port: DEFAULT_DISCOVERY_PORT, + port: 30303, id, }); @@ -264,7 +268,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)], - port: DEFAULT_DISCOVERY_PORT, + port: 30303, id, }); diff --git a/crates/net/eth-wire/src/lib.rs b/crates/net/eth-wire/src/lib.rs index 3830baa1b7e5..e96a27077f8c 100644 --- a/crates/net/eth-wire/src/lib.rs +++ b/crates/net/eth-wire/src/lib.rs @@ -11,8 +11,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod capability; diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 84d43e8a87af..64d10b3c5033 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -312,13 +312,14 @@ impl ProtocolProxy { return Err(io::ErrorKind::InvalidInput.into()); } - let mut masked_bytes = BytesMut::zeroed(msg.len()); - masked_bytes[0] = msg[0] - .checked_add(self.shared_cap.relative_message_id_offset()) - .ok_or(io::ErrorKind::InvalidInput)?; + let offset = self.shared_cap.relative_message_id_offset(); + if offset == 0 { + return Ok(msg); + } - masked_bytes[1..].copy_from_slice(&msg[1..]); - Ok(masked_bytes.freeze()) + let mut masked = Vec::from(msg); + masked[0] = masked[0].checked_add(offset).ok_or(io::ErrorKind::InvalidInput)?; + Ok(masked.into()) } /// Unmasks the message ID of a message received from the wire. diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 7312a891e5db..717b79f9f437 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -478,11 +478,10 @@ where // // It's possible we already tried to RLP decode this, but it was snappy // compressed, so we need to RLP decode it again. - let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).map_err(|err| { + let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).inspect_err(|err| { debug!( %err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer" ); - err })?; return Poll::Ready(Some(Err(P2PStreamError::Disconnected(reason)))); } diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index c87e2c6c2200..8040eb5de2d0 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -1,11 +1,12 @@ //! Utilities for testing p2p protocol. use crate::{ - EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, + hello::DEFAULT_TCP_PORT, EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, + Status, UnauthedP2PStream, }; -use reth_discv4::DEFAULT_DISCOVERY_PORT; +use reth_chainspec::Chain; use reth_network_peers::pk2id; -use reth_primitives::{Chain, ForkFilter, Head, B256, U256}; +use reth_primitives::{ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; @@ -21,7 +22,7 @@ pub fn eth_hello() -> (HelloMessageWithProtocols, SecretKey) { protocol_version: ProtocolVersion::V5, client_version: "eth/1.0.0".to_string(), protocols, - port: DEFAULT_DISCOVERY_PORT, + port: DEFAULT_TCP_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; (hello, server_key) diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index f20d0397c2b6..ec55fc448ae0 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -1,8 +1,5 @@ //! Round-trip encoding fuzzing for the `eth-wire` crate. -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] - use alloy_rlp::{Decodable, Encodable}; use serde::Serialize; use std::fmt::Debug; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 634c45a79c7b..bedaf2c297ce 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-eth-wire.workspace = true -reth-rpc-types.workspace = true +alloy-rpc-types-admin.workspace = true reth-network-peers.workspace = true # ethereum diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 97bd784065e8..8efaec5f0fb7 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -13,13 +13,13 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_eth_wire::{capability::Capabilities, DisconnectReason, EthVersion, Status}; -use reth_rpc_types::NetworkStatus; -use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; - +pub use alloy_rpc_types_admin::EthProtocolInfo; pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; +use reth_eth_wire::{capability::Capabilities, DisconnectReason, EthVersion, Status}; use reth_network_peers::NodeRecord; +use serde::{Deserialize, Serialize}; +use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; /// The `PeerId` type. pub type PeerId = alloy_primitives::B512; @@ -66,9 +66,14 @@ pub trait PeersInfo: Send + Sync { /// Provides an API for managing the peers of the network. pub trait Peers: PeersInfo { - /// Adds a peer to the peer set. - fn add_peer(&self, peer: PeerId, addr: SocketAddr) { - self.add_peer_kind(peer, PeerKind::Basic, addr); + /// Adds a peer to the peer set with UDP `SocketAddr`. + fn add_peer(&self, peer: PeerId, tcp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Static, tcp_addr, None); + } + + /// Adds a peer to the peer set with TCP and UDP `SocketAddr`. + fn add_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Static, tcp_addr, Some(udp_addr)); } /// Adds a trusted [`PeerId`] to the peer set. @@ -76,13 +81,24 @@ pub trait Peers: PeersInfo { /// This allows marking a peer as trusted without having to know the peer's address. fn add_trusted_peer_id(&self, peer: PeerId); - /// Adds a trusted peer to the peer set. - fn add_trusted_peer(&self, peer: PeerId, addr: SocketAddr) { - self.add_peer_kind(peer, PeerKind::Trusted, addr); + /// Adds a trusted peer to the peer set with UDP `SocketAddr`. + fn add_trusted_peer(&self, peer: PeerId, tcp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Trusted, tcp_addr, None); + } + + /// Adds a trusted peer with TCP and UDP `SocketAddr` to the peer set. + fn add_trusted_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Trusted, tcp_addr, Some(udp_addr)); } /// Adds a peer to the known peer set, with the given kind. - fn add_peer_kind(&self, peer: PeerId, kind: PeerKind, addr: SocketAddr); + fn add_peer_kind( + &self, + peer: PeerId, + kind: PeerKind, + tcp_addr: SocketAddr, + udp_addr: Option, + ); /// Returns the rpc [`PeerInfo`] for all connected [`PeerKind::Trusted`] peers. fn get_trusted_peers( @@ -147,6 +163,8 @@ pub enum PeerKind { /// Basic peer kind. #[default] Basic, + /// Static peer, added via JSON-RPC. + Static, /// Trusted peer. Trusted, } @@ -157,6 +175,11 @@ impl PeerKind { matches!(self, Self::Trusted) } + /// Returns `true` if the peer is static. + pub const fn is_static(&self) -> bool { + matches!(self, Self::Static) + } + /// Returns `true` if the peer is basic. pub const fn is_basic(&self) -> bool { matches!(self, Self::Basic) @@ -172,6 +195,10 @@ pub struct PeerInfo { pub remote_id: PeerId, /// The client's name and version pub client_version: Arc, + /// The peer's enode + pub enode: String, + /// The peer's enr + pub enr: Option, /// The peer's address we're connected to pub remote_addr: SocketAddr, /// The local address of the connection @@ -184,6 +211,8 @@ pub struct PeerInfo { pub status: Arc, /// The timestamp when the session to that peer has been established. pub session_established: Instant, + /// The peer's connection kind + pub kind: PeerKind, } /// The direction of the connection. @@ -215,3 +244,14 @@ impl std::fmt::Display for Direction { } } } + +/// The status of the network being ran by the local node. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NetworkStatus { + /// The local node client version. + pub client_version: String, + /// The current ethereum protocol version + pub protocol_version: u64, + /// Information about the Ethereum Wire Protocol. + pub eth_protocol_info: EthProtocolInfo, +} diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 0678b928857d..a74204a3f742 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -4,13 +4,13 @@ //! generic over it. use crate::{ - NetworkError, NetworkInfo, PeerId, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, - ReputationChangeKind, + NetworkError, NetworkInfo, NetworkStatus, PeerId, PeerInfo, PeerKind, Peers, PeersInfo, + Reputation, ReputationChangeKind, }; +use alloy_rpc_types_admin::EthProtocolInfo; use enr::{secp256k1::SecretKey, Enr}; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; use reth_network_peers::NodeRecord; -use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; /// A type that implements all network trait that does nothing. @@ -71,7 +71,14 @@ impl PeersInfo for NoopNetwork { impl Peers for NoopNetwork { fn add_trusted_peer_id(&self, _peer: PeerId) {} - fn add_peer_kind(&self, _peer: PeerId, _kind: PeerKind, _addr: SocketAddr) {} + fn add_peer_kind( + &self, + _peer: PeerId, + _kind: PeerKind, + _tcp_addr: SocketAddr, + _udp_addr: Option, + ) { + } async fn get_peers_by_kind(&self, _kind: PeerKind) -> Result, NetworkError> { Ok(vec![]) diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml new file mode 100644 index 000000000000..66c1f4d84a38 --- /dev/null +++ b/crates/net/network-types/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "reth-network-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Commonly used network types" + +[lints] +workspace = true + +[dependencies] +# reth +reth-network-api.workspace = true +reth-network-peers.workspace = true +reth-net-banlist.workspace = true + +# io +serde = { workspace = true, optional = true } +humantime-serde = { workspace = true, optional = true } +serde_json = { workspace = true } + +# misc +tracing.workspace = true + +[features] +serde = ["dep:serde", "dep:humantime-serde"] +test-utils = [] diff --git a/crates/net/network-types/src/backoff.rs b/crates/net/network-types/src/backoff.rs new file mode 100644 index 000000000000..8ee9f68a4e31 --- /dev/null +++ b/crates/net/network-types/src/backoff.rs @@ -0,0 +1,27 @@ +/// Describes the type of backoff should be applied. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BackoffKind { + /// Use the lowest configured backoff duration. + /// + /// This applies to connection problems where there is a chance that they will be resolved + /// after the short duration. + Low, + /// Use a slightly higher duration to put a peer in timeout + /// + /// This applies to more severe connection problems where there is a lower chance that they + /// will be resolved. + Medium, + /// Use the max configured backoff duration. + /// + /// This is intended for spammers, or bad peers in general. + High, +} + +// === impl BackoffKind === + +impl BackoffKind { + /// Returns true if the backoff is considered severe. + pub const fn is_severe(&self) -> bool { + matches!(self, Self::Medium | Self::High) + } +} diff --git a/crates/net/network-types/src/lib.rs b/crates/net/network-types/src/lib.rs new file mode 100644 index 000000000000..5b075d609bc1 --- /dev/null +++ b/crates/net/network-types/src/lib.rs @@ -0,0 +1,24 @@ +//! Commonly used networking types. +//! +//! ## Feature Flags +//! +//! - `serde` (default): Enable serde support + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Types related to peering. +pub mod peers; +pub use peers::{ConnectionsConfig, PeersConfig, ReputationChangeWeights}; + +pub mod session; +pub use session::{SessionLimits, SessionsConfig}; + +/// [`BackoffKind`] definition. +mod backoff; +pub use backoff::BackoffKind; diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs new file mode 100644 index 000000000000..5143c4c6f2bf --- /dev/null +++ b/crates/net/network-types/src/peers/config.rs @@ -0,0 +1,292 @@ +//! Configuration for peering. + +use crate::{BackoffKind, ReputationChangeWeights}; +use reth_net_banlist::BanList; +use reth_network_peers::NodeRecord; +use std::{ + collections::HashSet, + io::{self, ErrorKind}, + path::Path, + time::Duration, +}; +use tracing::info; + +/// Maximum number of available slots for outbound sessions. +pub const DEFAULT_MAX_COUNT_PEERS_OUTBOUND: u32 = 100; + +/// Maximum number of available slots for inbound sessions. +pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; + +/// Maximum number of available slots for concurrent outgoing dials. +/// +/// This restricts how many outbound dials can be performed concurrently. +pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; + +/// The durations to use when a backoff should be applied to a peer. +/// +/// See also [`BackoffKind`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct PeerBackoffDurations { + /// Applies to connection problems where there is a chance that they will be resolved after the + /// short duration. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub low: Duration, + /// Applies to more severe connection problems where there is a lower chance that they will be + /// resolved. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub medium: Duration, + /// Intended for spammers, or bad peers in general. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub high: Duration, + /// Maximum total backoff duration. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub max: Duration, +} + +impl PeerBackoffDurations { + /// Returns the corresponding [`Duration`] + pub const fn backoff(&self, kind: BackoffKind) -> Duration { + match kind { + BackoffKind::Low => self.low, + BackoffKind::Medium => self.medium, + BackoffKind::High => self.high, + } + } + + /// Returns the timestamp until which we should backoff. + /// + /// The Backoff duration is capped by the configured maximum backoff duration. + pub fn backoff_until(&self, kind: BackoffKind, backoff_counter: u8) -> std::time::Instant { + let backoff_time = self.backoff(kind); + let backoff_time = backoff_time + backoff_time * backoff_counter as u32; + let now = std::time::Instant::now(); + now + backoff_time.min(self.max) + } + + /// Returns durations for testing. + #[cfg(any(test, feature = "test-utils"))] + pub const fn test() -> Self { + Self { + low: Duration::from_millis(200), + medium: Duration::from_millis(200), + high: Duration::from_millis(200), + max: Duration::from_millis(200), + } + } +} + +impl Default for PeerBackoffDurations { + fn default() -> Self { + Self { + low: Duration::from_secs(30), + // 3min + medium: Duration::from_secs(60 * 3), + // 15min + high: Duration::from_secs(60 * 15), + // 1h + max: Duration::from_secs(60 * 60), + } + } +} + +/// Tracks stats about connected nodes +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize), serde(default))] +pub struct ConnectionsConfig { + /// Maximum allowed outbound connections. + pub max_outbound: usize, + /// Maximum allowed inbound connections. + pub max_inbound: usize, + /// Maximum allowed concurrent outbound dials. + #[cfg_attr(feature = "serde", serde(default))] + pub max_concurrent_outbound_dials: usize, +} + +impl Default for ConnectionsConfig { + fn default() -> Self { + Self { + max_outbound: DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize, + max_inbound: DEFAULT_MAX_COUNT_PEERS_INBOUND as usize, + max_concurrent_outbound_dials: DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS, + } + } +} + +/// Config type for initiating a `PeersManager` instance. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(default))] +pub struct PeersConfig { + /// How often to recheck free slots for outbound connections. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub refill_slots_interval: Duration, + /// Trusted nodes to connect to or accept from + pub trusted_nodes: HashSet, + /// Connect to or accept from trusted nodes only? + #[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))] + pub trusted_nodes_only: bool, + /// Maximum number of backoff attempts before we give up on a peer and dropping. + /// + /// The max time spent of a peer before it's removed from the set is determined by the + /// configured backoff duration and the max backoff count. + /// + /// With a backoff counter of 5 and a backoff duration of 1h, the minimum time spent of the + /// peer in the table is the sum of all backoffs (1h + 2h + 3h + 4h + 5h = 15h). + /// + /// Note: this does not apply to trusted peers. + pub max_backoff_count: u8, + /// Basic nodes to connect to. + #[cfg_attr(feature = "serde", serde(skip))] + pub basic_nodes: HashSet, + /// How long to ban bad peers. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub ban_duration: Duration, + /// Restrictions on `PeerIds` and Ips. + #[cfg_attr(feature = "serde", serde(skip))] + pub ban_list: BanList, + /// Restrictions on connections. + pub connection_info: ConnectionsConfig, + /// How to weigh reputation changes. + pub reputation_weights: ReputationChangeWeights, + /// How long to backoff peers that we are failed to connect to for non-fatal reasons. + /// + /// The backoff duration increases with number of backoff attempts. + pub backoff_durations: PeerBackoffDurations, +} + +impl Default for PeersConfig { + fn default() -> Self { + Self { + refill_slots_interval: Duration::from_millis(5_000), + connection_info: Default::default(), + reputation_weights: Default::default(), + ban_list: Default::default(), + // Ban peers for 12h + ban_duration: Duration::from_secs(60 * 60 * 12), + backoff_durations: Default::default(), + trusted_nodes: Default::default(), + trusted_nodes_only: false, + basic_nodes: Default::default(), + max_backoff_count: 5, + } + } +} + +impl PeersConfig { + /// A set of `peer_ids` and ip addr that we want to never connect to + pub fn with_ban_list(mut self, ban_list: BanList) -> Self { + self.ban_list = ban_list; + self + } + + /// Configure how long to ban bad peers + pub const fn with_ban_duration(mut self, ban_duration: Duration) -> Self { + self.ban_duration = ban_duration; + self + } + + /// Maximum allowed outbound connections. + pub const fn with_max_outbound(mut self, max_outbound: usize) -> Self { + self.connection_info.max_outbound = max_outbound; + self + } + + /// Maximum allowed inbound connections with optional update. + pub const fn with_max_inbound_opt(mut self, max_inbound: Option) -> Self { + if let Some(max_inbound) = max_inbound { + self.connection_info.max_inbound = max_inbound; + } + self + } + + /// Maximum allowed outbound connections with optional update. + pub const fn with_max_outbound_opt(mut self, max_outbound: Option) -> Self { + if let Some(max_outbound) = max_outbound { + self.connection_info.max_outbound = max_outbound; + } + self + } + + /// Maximum allowed inbound connections. + pub const fn with_max_inbound(mut self, max_inbound: usize) -> Self { + self.connection_info.max_inbound = max_inbound; + self + } + + /// Maximum allowed concurrent outbound dials. + pub const fn with_max_concurrent_dials(mut self, max_concurrent_outbound_dials: usize) -> Self { + self.connection_info.max_concurrent_outbound_dials = max_concurrent_outbound_dials; + self + } + + /// Nodes to always connect to. + pub fn with_trusted_nodes(mut self, nodes: HashSet) -> Self { + self.trusted_nodes = nodes; + self + } + + /// Connect only to trusted nodes. + pub const fn with_trusted_nodes_only(mut self, trusted_only: bool) -> Self { + self.trusted_nodes_only = trusted_only; + self + } + + /// Nodes available at launch. + pub fn with_basic_nodes(mut self, nodes: HashSet) -> Self { + self.basic_nodes = nodes; + self + } + + /// Configures the max allowed backoff count. + pub const fn with_max_backoff_count(mut self, max_backoff_count: u8) -> Self { + self.max_backoff_count = max_backoff_count; + self + } + + /// Configures how to weigh reputation changes. + pub const fn with_reputation_weights( + mut self, + reputation_weights: ReputationChangeWeights, + ) -> Self { + self.reputation_weights = reputation_weights; + self + } + + /// Configures how long to backoff peers that are we failed to connect to for non-fatal reasons + pub const fn with_backoff_durations(mut self, backoff_durations: PeerBackoffDurations) -> Self { + self.backoff_durations = backoff_durations; + self + } + + /// Returns the maximum number of peers, inbound and outbound. + pub const fn max_peers(&self) -> usize { + self.connection_info.max_outbound + self.connection_info.max_inbound + } + + /// Read from file nodes available at launch. Ignored if None. + pub fn with_basic_nodes_from_file( + self, + optional_file: Option>, + ) -> Result { + let Some(file_path) = optional_file else { return Ok(self) }; + let reader = match std::fs::File::open(file_path.as_ref()) { + Ok(file) => io::BufReader::new(file), + Err(e) if e.kind() == ErrorKind::NotFound => return Ok(self), + Err(e) => Err(e)?, + }; + info!(target: "net::peers", file = %file_path.as_ref().display(), "Loading saved peers"); + let nodes: HashSet = serde_json::from_reader(reader)?; + Ok(self.with_basic_nodes(nodes)) + } + + /// Returns settings for testing + #[cfg(any(test, feature = "test-utils"))] + pub fn test() -> Self { + Self { + refill_slots_interval: Duration::from_millis(100), + backoff_durations: PeerBackoffDurations::test(), + ..Default::default() + } + } +} diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs new file mode 100644 index 000000000000..4b195750b516 --- /dev/null +++ b/crates/net/network-types/src/peers/mod.rs @@ -0,0 +1,5 @@ +pub mod reputation; +pub use reputation::ReputationChangeWeights; + +pub mod config; +pub use config::{ConnectionsConfig, PeersConfig}; diff --git a/crates/net/network/src/peers/reputation.rs b/crates/net/network-types/src/peers/reputation.rs similarity index 92% rename from crates/net/network/src/peers/reputation.rs rename to crates/net/network-types/src/peers/reputation.rs index 9d3ec256bea0..13fac4c1ebc2 100644 --- a/crates/net/network/src/peers/reputation.rs +++ b/crates/net/network-types/src/peers/reputation.rs @@ -3,13 +3,13 @@ use reth_network_api::{Reputation, ReputationChangeKind}; /// The default reputation of a peer -pub(crate) const DEFAULT_REPUTATION: Reputation = 0; +pub const DEFAULT_REPUTATION: Reputation = 0; /// The minimal unit we're measuring reputation const REPUTATION_UNIT: i32 = -1024; /// The reputation value below which new connection from/to peers are rejected. -pub(crate) const BANNED_REPUTATION: i32 = 50 * REPUTATION_UNIT; +pub const BANNED_REPUTATION: i32 = 50 * REPUTATION_UNIT; /// The reputation change to apply to a peer that dropped the connection. const REMOTE_DISCONNECT_REPUTATION_CHANGE: i32 = 4 * REPUTATION_UNIT; @@ -42,11 +42,11 @@ const BAD_ANNOUNCEMENT_REPUTATION_CHANGE: i32 = REPUTATION_UNIT; /// This gives a trusted peer more leeway when interacting with the node, which is useful for in /// custom setups. By not setting this to `0` we still allow trusted peer penalization but less than /// untrusted peers. -pub(crate) const MAX_TRUSTED_PEER_REPUTATION_CHANGE: Reputation = 2 * REPUTATION_UNIT; +pub const MAX_TRUSTED_PEER_REPUTATION_CHANGE: Reputation = 2 * REPUTATION_UNIT; /// Returns `true` if the given reputation is below the [`BANNED_REPUTATION`] threshold #[inline] -pub(crate) const fn is_banned_reputation(reputation: i32) -> bool { +pub const fn is_banned_reputation(reputation: i32) -> bool { reputation < BANNED_REPUTATION } @@ -80,7 +80,7 @@ pub struct ReputationChangeWeights { impl ReputationChangeWeights { /// Returns the quantifiable [`ReputationChange`] for the given [`ReputationChangeKind`] using /// the configured weights - pub(crate) fn change(&self, kind: ReputationChangeKind) -> ReputationChange { + pub fn change(&self, kind: ReputationChangeKind) -> ReputationChange { match kind { ReputationChangeKind::BadMessage => self.bad_message.into(), ReputationChangeKind::BadBlock => self.bad_block.into(), @@ -115,14 +115,14 @@ impl Default for ReputationChangeWeights { /// Represents a change in a peer's reputation. #[derive(Debug, Copy, Clone, Default)] -pub(crate) struct ReputationChange(Reputation); +pub struct ReputationChange(Reputation); // === impl ReputationChange === impl ReputationChange { /// Helper type for easier conversion #[inline] - pub(crate) const fn as_i32(self) -> Reputation { + pub const fn as_i32(self) -> Reputation { self.0 } } diff --git a/crates/net/network/src/session/config.rs b/crates/net/network-types/src/session/config.rs similarity index 65% rename from crates/net/network/src/session/config.rs rename to crates/net/network-types/src/session/config.rs index 98136ee80a10..941448effd6b 100644 --- a/crates/net/network/src/session/config.rs +++ b/crates/net/network-types/src/session/config.rs @@ -1,9 +1,6 @@ -//! Configuration types for [`SessionManager`](crate::session::SessionManager). +//! Configuration types for peer sessions manager. -use crate::{ - peers::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}, - session::{Direction, ExceedsSessionLimit}, -}; +use crate::peers::config::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}; use std::time::Duration; /// Default request timeout for a single request. @@ -29,7 +26,7 @@ const DEFAULT_MAX_PEERS: usize = /// With maxed out peers, this will allow for 3 messages per session (average) const DEFAULT_SESSION_EVENT_BUFFER_SIZE: usize = DEFAULT_MAX_PEERS * 2; -/// Configuration options when creating a [`SessionManager`](crate::session::SessionManager). +/// Configuration options for peer session management. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(default))] @@ -111,10 +108,14 @@ impl SessionsConfig { #[derive(Debug, Clone, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct SessionLimits { - max_pending_inbound: Option, - max_pending_outbound: Option, - max_established_inbound: Option, - max_established_outbound: Option, + /// Maximum allowed inbound connections. + pub max_pending_inbound: Option, + /// Maximum allowed outbound connections. + pub max_pending_outbound: Option, + /// Maximum allowed established inbound connections. + pub max_established_inbound: Option, + /// Maximum allowed established outbound connections. + pub max_established_outbound: Option, } impl SessionLimits { @@ -143,107 +144,10 @@ impl SessionLimits { } } -/// Keeps track of all sessions. -#[derive(Debug, Clone)] -pub struct SessionCounter { - /// Limits to enforce. - limits: SessionLimits, - /// Number of pending incoming sessions. - pending_inbound: u32, - /// Number of pending outgoing sessions. - pending_outbound: u32, - /// Number of active inbound sessions. - active_inbound: u32, - /// Number of active outbound sessions. - active_outbound: u32, -} - -// === impl SessionCounter === - -impl SessionCounter { - pub(crate) const fn new(limits: SessionLimits) -> Self { - Self { - limits, - pending_inbound: 0, - pending_outbound: 0, - active_inbound: 0, - active_outbound: 0, - } - } - - pub(crate) fn inc_pending_inbound(&mut self) { - self.pending_inbound += 1; - } - - pub(crate) fn inc_pending_outbound(&mut self) { - self.pending_outbound += 1; - } - - pub(crate) fn dec_pending(&mut self, direction: &Direction) { - match direction { - Direction::Outgoing(_) => { - self.pending_outbound -= 1; - } - Direction::Incoming => { - self.pending_inbound -= 1; - } - } - } - - pub(crate) fn inc_active(&mut self, direction: &Direction) { - match direction { - Direction::Outgoing(_) => { - self.active_outbound += 1; - } - Direction::Incoming => { - self.active_inbound += 1; - } - } - } - - pub(crate) fn dec_active(&mut self, direction: &Direction) { - match direction { - Direction::Outgoing(_) => { - self.active_outbound -= 1; - } - Direction::Incoming => { - self.active_inbound -= 1; - } - } - } - - pub(crate) const fn ensure_pending_outbound(&self) -> Result<(), ExceedsSessionLimit> { - Self::ensure(self.pending_outbound, self.limits.max_pending_outbound) - } - - pub(crate) const fn ensure_pending_inbound(&self) -> Result<(), ExceedsSessionLimit> { - Self::ensure(self.pending_inbound, self.limits.max_pending_inbound) - } - - const fn ensure(current: u32, limit: Option) -> Result<(), ExceedsSessionLimit> { - if let Some(limit) = limit { - if current >= limit { - return Err(ExceedsSessionLimit(limit)); - } - } - Ok(()) - } -} - #[cfg(test)] mod tests { use super::*; - #[test] - fn test_limits() { - let mut limits = SessionCounter::new(SessionLimits::default().with_max_pending_inbound(2)); - assert!(limits.ensure_pending_outbound().is_ok()); - limits.inc_pending_inbound(); - assert!(limits.ensure_pending_inbound().is_ok()); - limits.inc_pending_inbound(); - assert!(limits.ensure_pending_inbound().is_err()); - } - #[test] fn scale_session_event_buffer() { let config = SessionsConfig::default().with_upscaled_event_buffer(10); diff --git a/crates/net/network-types/src/session/mod.rs b/crates/net/network-types/src/session/mod.rs new file mode 100644 index 000000000000..a5b613189c02 --- /dev/null +++ b/crates/net/network-types/src/session/mod.rs @@ -0,0 +1,4 @@ +//! Peer sessions configuration. + +pub mod config; +pub use config::{SessionLimits, SessionsConfig}; diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index b41d3db54867..61b887f042d5 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -13,8 +13,10 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true +reth-fs-util.workspace = true reth-primitives.workspace = true -reth-net-common.workspace = true +reth-net-banlist.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true reth-discv4.workspace = true @@ -24,11 +26,12 @@ reth-eth-wire.workspace = true reth-ecies.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true -reth-provider.workspace = true -reth-rpc-types.workspace = true +reth-storage-api.workspace = true +reth-provider = { workspace = true, optional = true } reth-tokio-util.workspace = true reth-consensus.workspace = true -reth-network-peers.workspace = true +reth-network-peers = { workspace = true, features = ["net"] } +reth-network-types.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } @@ -55,7 +58,7 @@ metrics.workspace = true auto_impl.workspace = true aquamarine.workspace = true tracing.workspace = true -fnv = "1.0" +rustc-hash.workspace = true thiserror.workspace = true parking_lot.workspace = true rand.workspace = true @@ -75,6 +78,7 @@ reth-primitives = { workspace = true, features = ["test-utils"] } # integration tests reth-network = { workspace = true, features = ["test-utils"] } reth-network-p2p = { workspace = true, features = ["test-utils"] } +reth-network-types = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true @@ -82,7 +86,7 @@ reth-transaction-pool = { workspace = true, features = ["test-utils"] } # alloy deps for testing against nodes alloy-node-bindings.workspace = true -alloy-provider.workspace = true +alloy-provider= { workspace = true, features = ["admin-api"] } # misc serial_test.workspace = true @@ -94,9 +98,9 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] -serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json"] -test-utils = ["reth-provider/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils"] geth-tests = [] +serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json", "reth-network-types/serde"] +test-utils = ["dep:reth-provider", "reth-provider?/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils", "reth-network-types/test-utils"] [[bench]] name = "bench" diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 3066c9eafb46..b197fc55f8f1 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -3,20 +3,18 @@ use crate::{ error::NetworkError, import::{BlockImport, ProofOfStakeBlockImport}, - peers::PeersConfig, - session::SessionsConfig, transactions::TransactionsManagerConfig, NetworkHandle, NetworkManager, }; +use reth_chainspec::{ChainSpec, MAINNET}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; -use reth_network_peers::{pk2id, PeerId}; -use reth_primitives::{ - mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, TrustedPeer, MAINNET, -}; -use reth_provider::{BlockReader, HeaderProvider}; +use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; +use reth_network_types::{PeersConfig, SessionsConfig}; +use reth_primitives::{ForkFilter, Head}; +use reth_storage_api::{BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; @@ -90,6 +88,11 @@ impl NetworkConfig<()> { pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } + + /// Convenience method for creating the corresponding builder type with a random secret key. + pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { + NetworkConfigBuilder::with_rng_secret_key() + } } impl NetworkConfig { @@ -116,6 +119,16 @@ impl NetworkConfig { } } +impl NetworkConfig +where + C: BlockNumReader, +{ + /// Convenience method for calling [`NetworkManager::new`]. + pub async fn manager(self) -> Result, NetworkError> { + NetworkManager::new(self).await + } +} + impl NetworkConfig where C: BlockReader + HeaderProvider + Clone + Unpin + 'static, @@ -135,7 +148,6 @@ where /// Builder for [`NetworkConfig`](struct.NetworkConfig.html). #[derive(Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NetworkConfigBuilder { /// The node's secret key, from which the node's identity is derived. secret_key: SecretKey, @@ -144,7 +156,6 @@ pub struct NetworkConfigBuilder { /// How to set up discovery version 4. discovery_v4_builder: Option, /// How to set up discovery version 5. - #[serde(skip)] discovery_v5_builder: Option, /// All boot nodes to start network discovery with. boot_nodes: HashSet, @@ -161,19 +172,16 @@ pub struct NetworkConfigBuilder { /// The default mode of the network. network_mode: NetworkMode, /// The executor to use for spawning tasks. - #[serde(skip)] executor: Option>, /// Sets the hello message for the p2p handshake in `RLPx` hello_message: Option, /// The executor to use for spawning tasks. - #[serde(skip)] extra_protocols: RlpxSubProtocols, /// Head used to start set for the fork filter and status. head: Option, /// Whether tx gossip is disabled tx_gossip_disabled: bool, /// The block importer type - #[serde(skip)] block_import: Option>, /// How to instantiate transactions manager. transactions_manager_config: TransactionsManagerConfig, @@ -183,6 +191,12 @@ pub struct NetworkConfigBuilder { #[allow(missing_docs)] impl NetworkConfigBuilder { + /// Create a new builder instance with a random secret key. + pub fn with_rng_secret_key() -> Self { + Self::new(rng_secret_key()) + } + + /// Create a new builder instance with the given secret key. pub fn new(secret_key: SecretKey) -> Self { Self { secret_key, @@ -219,6 +233,11 @@ impl NetworkConfigBuilder { pk2id(&self.secret_key.public_key(SECP256K1)) } + /// Returns the configured [`SecretKey`], from which the node's identity is derived. + pub const fn secret_key(&self) -> &SecretKey { + &self.secret_key + } + /// Sets the chain spec. pub fn chain_spec(mut self, chain_spec: Arc) -> Self { self.chain_spec = chain_spec; @@ -414,36 +433,6 @@ impl NetworkConfigBuilder { } } - /// Calls a closure on [`reth_discv5::ConfigBuilder`], if discv5 discovery is enabled and the - /// builder has been set. - /// ``` - /// use reth_network::NetworkConfigBuilder; - /// use reth_primitives::MAINNET; - /// use reth_provider::test_utils::NoopProvider; - /// use secp256k1::{rand::thread_rng, SecretKey}; - /// - /// let sk = SecretKey::new(&mut thread_rng()); - /// let fork_id = MAINNET.latest_fork_id(); - /// let network_config = NetworkConfigBuilder::new(sk) - /// .map_discv5_config_builder(|builder| builder.fork(b"eth", fork_id)) - /// .build(NoopProvider::default()); - /// ``` - pub fn map_discv5_config_builder( - mut self, - f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::ConfigBuilder, - ) -> Self { - if let Some(mut builder) = self.discovery_v5_builder { - if let Some(network_stack_id) = NetworkStackId::id(&self.chain_spec) { - let fork_id = self.chain_spec.latest_fork_id(); - builder = builder.fork(network_stack_id, fork_id); - } - - self.discovery_v5_builder = Some(f(builder)); - } - - self - } - /// Adds a new additional protocol to the `RLPx` sub-protocol list. pub fn add_rlpx_sub_protocol(mut self, protocol: impl IntoRlpxSubProtocol) -> Self { self.extra_protocols.push(protocol); @@ -464,11 +453,10 @@ impl NetworkConfigBuilder { /// Convenience function for creating a [`NetworkConfig`] with a noop provider that does /// nothing. - #[cfg(any(test, feature = "test-utils"))] pub fn build_with_noop_provider( self, - ) -> NetworkConfig { - self.build(reth_provider::test_utils::NoopProvider::default()) + ) -> NetworkConfig { + self.build(Default::default()) } /// Consumes the type and creates the actual [`NetworkConfig`] @@ -483,7 +471,7 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, - discovery_v5_builder, + mut discovery_v5_builder, boot_nodes, discovery_addr, listener_addr, @@ -500,6 +488,15 @@ impl NetworkConfigBuilder { transactions_manager_config, } = self; + discovery_v5_builder = discovery_v5_builder.map(|mut builder| { + if let Some(network_stack_id) = NetworkStackId::id(&chain_spec) { + let fork_id = chain_spec.latest_fork_id(); + builder = builder.fork(network_stack_id, fork_id) + } + + builder + }); + let listener_addr = listener_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS); let mut hello_message = @@ -584,10 +581,10 @@ impl NetworkMode { mod tests { use super::*; use rand::thread_rng; + use reth_chainspec::Chain; use reth_dns_discovery::tree::LinkEntry; - use reth_primitives::{Chain, ForkHash}; + use reth_primitives::ForkHash; use reth_provider::test_utils::NoopProvider; - use std::collections::BTreeMap; fn builder() -> NetworkConfigBuilder { let secret_key = SecretKey::new(&mut thread_rng()); @@ -611,7 +608,7 @@ mod tests { let mut chain_spec = Arc::clone(&MAINNET); // remove any `next` fields we would have by removing all hardforks - Arc::make_mut(&mut chain_spec).hardforks = BTreeMap::new(); + Arc::make_mut(&mut chain_spec).hardforks = Default::default(); // check that the forkid is initialized with the genesis and no other forks let genesis_fork_hash = ForkHash::from(chain_spec.genesis_hash()); diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index ec57523ce6d8..90105751708d 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -4,6 +4,7 @@ use crate::{ cache::LruMap, error::{NetworkError, ServiceKind}, manager::DiscoveredEvent, + peers::PeerAddr, }; use enr::Enr; use futures::StreamExt; @@ -12,8 +13,8 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; -use reth_network_peers::PeerId; -use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; +use reth_network_peers::{NodeRecord, PeerId}; +use reth_primitives::{EnrForkIdEntry, ForkId}; use secp256k1::SecretKey; use std::{ collections::VecDeque, @@ -40,7 +41,7 @@ pub struct Discovery { /// All nodes discovered via discovery protocol. /// /// These nodes can be ephemeral and are updated via the discovery protocol. - discovered_nodes: LruMap, + discovered_nodes: LruMap, /// Local ENR of the discovery v4 service (discv5 ENR has same [`PeerId`]). local_enr: NodeRecord, /// Handler to interact with the Discovery v4 service @@ -71,14 +72,17 @@ impl Discovery { /// This will spawn the [`reth_discv4::Discv4Service`] onto a new task and establish a listener /// channel to receive all discovered nodes. pub async fn new( + tcp_addr: SocketAddr, discovery_v4_addr: SocketAddr, sk: SecretKey, discv4_config: Option, discv5_config: Option, // contains discv5 listen address dns_discovery_config: Option, ) -> Result { - // setup discv4 - let local_enr = NodeRecord::from_secret_key(discovery_v4_addr, &sk); + // setup discv4 with the discovery address and tcp port + let local_enr = + NodeRecord::from_secret_key(discovery_v4_addr, &sk).with_tcp_port(tcp_addr.port()); + let discv4_future = async { let Some(disc_config) = discv4_config else { return Ok((None, None, None)) }; let (discv4, mut discv4_service) = @@ -201,12 +205,14 @@ impl Discovery { /// Processes an incoming [`NodeRecord`] update from a discovery service fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option) { - let id = record.id; - let addr = record.tcp_addr(); + let peer_id = record.id; + let tcp_addr = record.tcp_addr(); + let udp_addr = record.udp_addr(); + let addr = PeerAddr::new(tcp_addr, Some(udp_addr)); _ = - self.discovered_nodes.get_or_insert(id, || { + self.discovered_nodes.get_or_insert(peer_id, || { self.queued_events.push_back(DiscoveryEvent::NewNode( - DiscoveredEvent::EventQueued { peer_id: id, socket_addr: addr, fork_id }, + DiscoveredEvent::EventQueued { peer_id, addr, fork_id }, )); addr @@ -221,8 +227,8 @@ impl Discovery { DiscoveryUpdate::EnrForkId(node, fork_id) => { self.queued_events.push_back(DiscoveryEvent::EnrForkId(node.id, fork_id)) } - DiscoveryUpdate::Removed(node) => { - self.discovered_nodes.remove(&node); + DiscoveryUpdate::Removed(peer_id) => { + self.discovered_nodes.remove(&peer_id); } DiscoveryUpdate::Batch(updates) => { for update in updates { @@ -342,6 +348,7 @@ mod tests { let (secret_key, _) = SECP256K1.generate_keypair(&mut rng); let discovery_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)); let _discovery = Discovery::new( + discovery_addr, discovery_addr, secret_key, Default::default(), @@ -370,9 +377,16 @@ mod tests { .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); - Discovery::new(discv4_addr, secret_key, Some(discv4_config), Some(discv5_config), None) - .await - .expect("should build discv5 with discv4 downgrade") + Discovery::new( + discv4_addr, + discv4_addr, + secret_key, + Some(discv4_config), + Some(discv5_config), + None, + ) + .await + .expect("should build discv5 with discv4 downgrade") } #[tokio::test(flavor = "multi_thread")] @@ -416,7 +430,7 @@ mod tests { assert_eq!( DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id: discv4_id_2, - socket_addr: discv4_enr_2.tcp_addr(), + addr: PeerAddr::new(discv4_enr_2.tcp_addr(), Some(discv4_enr_2.udp_addr())), fork_id: None }), event_node_1 @@ -424,7 +438,7 @@ mod tests { assert_eq!( DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id: discv4_id_1, - socket_addr: discv4_enr_1.tcp_addr(), + addr: PeerAddr::new(discv4_enr_1.tcp_addr(), Some(discv4_enr_1.udp_addr())), fork_id: None }), event_node_2 diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 8e61edfe9ee9..66f7bd5748ad 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -6,6 +6,7 @@ use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, DisconnectReason, }; +use reth_network_types::BackoffKind; use std::{fmt, io, io::ErrorKind, net::SocketAddr}; /// Service kind. @@ -104,34 +105,6 @@ pub(crate) trait SessionError: fmt::Debug + fmt::Display { fn should_backoff(&self) -> Option; } -/// Describes the type of backoff should be applied. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum BackoffKind { - /// Use the lowest configured backoff duration. - /// - /// This applies to connection problems where there is a chance that they will be resolved - /// after the short duration. - Low, - /// Use a slightly higher duration to put a peer in timeout - /// - /// This applies to more severe connection problems where there is a lower chance that they - /// will be resolved. - Medium, - /// Use the max configured backoff duration. - /// - /// This is intended for spammers, or bad peers in general. - High, -} - -// === impl BackoffKind === - -impl BackoffKind { - /// Returns true if the backoff is considered severe. - pub(crate) const fn is_severe(&self) -> bool { - matches!(self, Self::Medium | Self::High) - } -} - impl SessionError for EthStreamError { fn merits_discovery_ban(&self) -> bool { match self { diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 44666b093822..2f475e38433b 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -7,13 +7,13 @@ use crate::{ use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ - BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, NodeData, - Receipts, + BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, + HeadersDirection, NodeData, Receipts, }; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection}; -use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; +use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, pin::Pin, @@ -66,8 +66,12 @@ pub struct EthRequestHandler { impl EthRequestHandler { /// Create a new instance pub fn new(client: C, peers: PeersHandle, incoming: Receiver) -> Self { - let metrics = Default::default(); - Self { client, peers, incoming_requests: ReceiverStream::new(incoming), metrics } + Self { + client, + peers, + incoming_requests: ReceiverStream::new(incoming), + metrics: Default::default(), + } } } @@ -124,12 +128,8 @@ where total_bytes += header.length(); headers.push(header); - if headers.len() >= MAX_HEADERS_SERVE { - break; - } - - if total_bytes > SOFT_RESPONSE_LIMIT { - break; + if headers.len() >= MAX_HEADERS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { + break } } else { break; @@ -163,22 +163,13 @@ where for hash in request.0 { if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() { - let body = BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - requests: block.requests, - }; + let body: BlockBody = block.into(); total_bytes += body.length(); bodies.push(body); - if bodies.len() >= MAX_BODIES_SERVE { - break; - } - - if total_bytes > SOFT_RESPONSE_LIMIT { - break; + if bodies.len() >= MAX_BODIES_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { + break } } else { break; @@ -212,12 +203,8 @@ where total_bytes += receipt.length(); receipts.push(receipt); - if receipts.len() >= MAX_RECEIPTS_SERVE { - break; - } - - if total_bytes > SOFT_RESPONSE_LIMIT { - break; + if receipts.len() >= MAX_RECEIPTS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { + break } } else { break; diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index e02778f85464..f14f6e850416 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -47,7 +47,7 @@ //! ``` //! # async fn launch() { //! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; -//! use reth_primitives::mainnet_nodes; +//! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! //! // This block provider implementation is used for testing purposes. @@ -72,7 +72,7 @@ //! //! ``` //! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; -//! use reth_primitives::mainnet_nodes; +//! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! use reth_transaction_pool::TransactionPool; //! async fn launch(pool: Pool) { @@ -143,12 +143,12 @@ pub use fetch::FetchClient; pub use manager::{NetworkEvent, NetworkManager}; pub use message::PeerRequest; pub use network::{NetworkEvents, NetworkHandle, NetworkProtocols}; -pub use peers::PeersConfig; pub use session::{ ActiveSessionHandle, ActiveSessionMessage, Direction, PeerInfo, PendingSessionEvent, PendingSessionHandle, PendingSessionHandshakeError, SessionCommand, SessionEvent, SessionId, - SessionLimits, SessionManager, SessionsConfig, + SessionManager, }; pub use transactions::{FilterAnnouncement, MessageFilter, ValidateTx68}; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; +pub use reth_network_types::{PeersConfig, SessionsConfig}; diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index d0654042abec..9fcc15a104b5 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -1,7 +1,6 @@ //! Contains connection-oriented interfaces. use futures::{ready, Stream}; - use std::{ io, net::SocketAddr, diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 1976bb5cc8a5..6e2bf1bc281a 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -26,7 +26,7 @@ use crate::{ message::{NewBlockMessage, PeerMessage, PeerRequest, PeerRequestSender}, metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, network::{NetworkHandle, NetworkHandleMessage}, - peers::{PeersHandle, PeersManager}, + peers::{PeerAddr, PeersHandle, PeersManager}, poll_nested_stream_with_budget, protocol::IntoRlpxSubProtocol, session::SessionManager, @@ -41,17 +41,18 @@ use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, DisconnectReason, EthVersion, Status, }; +use reth_fs_util::{self as fs, FsPathError}; use reth_metrics::common::mpsc::UnboundedMeteredSender; -use reth_network_api::ReputationChangeKind; -use reth_network_peers::PeerId; -use reth_primitives::{ForkId, NodeRecord}; -use reth_provider::{BlockNumReader, BlockReader}; -use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; +use reth_network_api::{EthProtocolInfo, NetworkStatus, PeerInfo, ReputationChangeKind}; +use reth_network_peers::{NodeRecord, PeerId}; +use reth_primitives::ForkId; +use reth_storage_api::BlockNumReader; use reth_tasks::shutdown::GracefulShutdown; use reth_tokio_util::EventSender; use secp256k1::SecretKey; use std::{ net::SocketAddr, + path::Path, pin::Pin, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -197,7 +198,9 @@ where let incoming = ConnectionListener::bind(listener_addr).await.map_err(|err| { NetworkError::from_io_error(err, ServiceKind::Listener(listener_addr)) })?; - let listener_address = Arc::new(Mutex::new(incoming.local_address())); + + // retrieve the tcp address of the socket + let listener_addr = incoming.local_address(); // resolve boot nodes let mut resolved_boot_nodes = vec![]; @@ -214,6 +217,7 @@ where }); let discovery = Discovery::new( + listener_addr, discovery_v4_addr, secret_key, discovery_v4_config, @@ -248,7 +252,7 @@ where let handle = NetworkHandle::new( Arc::clone(&num_active_peers), - listener_address, + Arc::new(Mutex::new(listener_addr)), to_manager_tx, secret_key, local_peer_id, @@ -279,7 +283,7 @@ where /// /// ``` /// use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; - /// use reth_primitives::mainnet_nodes; + /// use reth_network_peers::mainnet_nodes; /// use reth_provider::test_utils::NoopProvider; /// use reth_transaction_pool::TransactionPool; /// async fn launch(pool: Pool) { @@ -314,7 +318,7 @@ where NetworkBuilder { network: self, transactions: (), request_handler: () } } - /// Returns the [`SocketAddr`] that listens for incoming connections. + /// Returns the [`SocketAddr`] that listens for incoming tcp connections. pub const fn local_addr(&self) -> SocketAddr { self.swarm.listener().local_address() } @@ -334,6 +338,11 @@ where self.swarm.state().peers().iter_peers() } + /// Returns the number of peers in the peer set. + pub fn num_known_peers(&self) -> usize { + self.swarm.state().peers().num_known_peers() + } + /// Returns a new [`PeersHandle`] that can be cloned and shared. /// /// The [`PeersHandle`] can be used to interact with the network's peer set. @@ -341,6 +350,18 @@ where self.swarm.state().peers().handle() } + /// Collect the peers from the [`NetworkManager`] and write them to the given + /// `persistent_peers_file`. + pub fn write_peers_to_file(&self, persistent_peers_file: &Path) -> Result<(), FsPathError> { + let known_peers = self.all_peers().collect::>(); + let known_peers = serde_json::to_string_pretty(&known_peers).map_err(|e| { + FsPathError::WriteJson { source: e, path: persistent_peers_file.to_path_buf() } + })?; + persistent_peers_file.parent().map(fs::create_dir_all).transpose()?; + fs::write(persistent_peers_file, known_peers)?; + Ok(()) + } + /// Returns a new [`FetchClient`] that can be cloned and shared. /// /// The [`FetchClient`] is the entrypoint for sending requests to the network. @@ -560,7 +581,7 @@ where } } NetworkHandleMessage::RemovePeer(peer_id, kind) => { - self.swarm.state_mut().remove_peer(peer_id, kind); + self.swarm.state_mut().remove_peer_kind(peer_id, kind); } NetworkHandleMessage::DisconnectPeer(peer_id, reason) => { self.swarm.sessions_mut().disconnect(peer_id, reason); @@ -602,17 +623,17 @@ where } } NetworkHandleMessage::GetPeerInfos(tx) => { - let _ = tx.send(self.swarm.sessions_mut().get_peer_info()); + let _ = tx.send(self.get_peer_infos()); } NetworkHandleMessage::GetPeerInfoById(peer_id, tx) => { - let _ = tx.send(self.swarm.sessions_mut().get_peer_info_by_id(peer_id)); + let _ = tx.send(self.get_peer_info_by_id(peer_id)); } NetworkHandleMessage::GetPeerInfosByIds(peer_ids, tx) => { - let _ = tx.send(self.swarm.sessions().get_peer_infos_by_ids(peer_ids)); + let _ = tx.send(self.get_peer_infos_by_ids(peer_ids)); } NetworkHandleMessage::GetPeerInfosByPeerKind(kind, tx) => { - let peers = self.swarm.state().peers().peers_by_kind(kind); - let _ = tx.send(self.swarm.sessions().get_peer_infos_by_ids(peers)); + let peer_ids = self.swarm.state().peers().peers_by_kind(kind); + let _ = tx.send(self.get_peer_infos_by_ids(peer_ids)); } NetworkHandleMessage::AddRlpxSubProtocol(proto) => self.add_rlpx_sub_protocol(proto), NetworkHandleMessage::GetTransactionsHandle(tx) => { @@ -863,6 +884,42 @@ where } } + /// Returns [`PeerInfo`] for all connected peers + fn get_peer_infos(&self) -> Vec { + self.swarm + .sessions() + .active_sessions() + .iter() + .filter_map(|(&peer_id, session)| { + self.swarm + .state() + .peers() + .peer_by_id(peer_id) + .map(|(record, kind)| session.peer_info(&record, kind)) + }) + .collect() + } + + /// Returns [`PeerInfo`] for a given peer. + /// + /// Returns `None` if there's no active session to the peer. + fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { + self.swarm.sessions().active_sessions().get(&peer_id).and_then(|session| { + self.swarm + .state() + .peers() + .peer_by_id(peer_id) + .map(|(record, kind)| session.peer_info(&record, kind)) + }) + } + + /// Returns [`PeerInfo`] for a given peers. + /// + /// Ignore the non-active peer. + fn get_peer_infos_by_ids(&self, peer_ids: impl IntoIterator) -> Vec { + peer_ids.into_iter().filter_map(|peer_id| self.get_peer_info_by_id(peer_id)).collect() + } + /// Updates the metrics for active,established connections #[inline] fn update_active_connection_metrics(&self) { @@ -888,7 +945,7 @@ where impl NetworkManager where - C: BlockReader + Unpin, + C: BlockNumReader + Unpin, { /// Drives the [`NetworkManager`] future until a [`GracefulShutdown`] signal is received. /// @@ -917,7 +974,7 @@ where impl Future for NetworkManager where - C: BlockReader + Unpin, + C: BlockNumReader + Unpin, { type Output = (); @@ -1028,7 +1085,7 @@ pub enum NetworkEvent { #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveredEvent { - EventQueued { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, + EventQueued { peer_id: PeerId, addr: PeerAddr, fork_id: Option }, } #[derive(Debug, Default)] diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 0729a17aab08..3fc8eb897a50 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,20 +1,25 @@ use crate::{ - config::NetworkMode, discovery::DiscoveryEvent, manager::NetworkEvent, message::PeerRequest, - peers::PeersHandle, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, + config::NetworkMode, + discovery::DiscoveryEvent, + manager::NetworkEvent, + message::PeerRequest, + peers::{PeerAddr, PeersHandle}, + protocol::RlpxSubProtocol, + swarm::NetworkConnectionState, + transactions::TransactionsHandle, + FetchClient, }; use enr::Enr; use parking_lot::Mutex; use reth_discv4::Discv4; use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, SharedTransactions}; use reth_network_api::{ - NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, + NetworkError, NetworkInfo, NetworkStatus, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; -use reth_network_peers::PeerId; -use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; -use reth_rpc_types::NetworkStatus; +use reth_network_peers::{NodeRecord, PeerId}; +use reth_primitives::{Head, TransactionSigned, B256}; use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; use std::{ @@ -258,7 +263,14 @@ impl Peers for NetworkHandle { /// Sends a message to the [`NetworkManager`](crate::NetworkManager) to add a peer to the known /// set, with the given kind. - fn add_peer_kind(&self, peer: PeerId, kind: PeerKind, addr: SocketAddr) { + fn add_peer_kind( + &self, + peer: PeerId, + kind: PeerKind, + tcp_addr: SocketAddr, + udp_addr: Option, + ) { + let addr = PeerAddr::new(tcp_addr, udp_addr); self.send_message(NetworkHandleMessage::AddPeerAddress(peer, kind, addr)); } @@ -421,7 +433,7 @@ pub(crate) enum NetworkHandleMessage { /// Marks a peer as trusted. AddTrustedPeerId(PeerId), /// Adds an address for a peer, including its ID, kind, and socket address. - AddPeerAddress(PeerId, PeerKind, SocketAddr), + AddPeerAddress(PeerId, PeerKind, PeerAddr), /// Removes a peer from the peerset corresponding to the given kind. RemovePeer(PeerId, PeerKind), /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers.rs similarity index 84% rename from crates/net/network/src/peers/manager.rs rename to crates/net/network/src/peers.rs index b1befae48927..8d5ad9dfd2b5 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers.rs @@ -1,27 +1,30 @@ +//! Peer related implementations + use crate::{ - error::{BackoffKind, SessionError}, - peers::{ - reputation::{ - is_banned_reputation, DEFAULT_REPUTATION, MAX_TRUSTED_PEER_REPUTATION_CHANGE, - }, - ReputationChangeWeights, DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS, - DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND, - }, + error::SessionError, session::{Direction, PendingSessionHandshakeError}, swarm::NetworkConnectionState, }; use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; -use reth_net_common::ban_list::BanList; +use reth_net_banlist::BanList; use reth_network_api::{PeerKind, ReputationChangeKind}; -use reth_network_peers::PeerId; -use reth_primitives::{ForkId, NodeRecord}; +use reth_network_peers::{NodeRecord, PeerId}; +use reth_network_types::{ + peers::{ + config::PeerBackoffDurations, + reputation::{ + is_banned_reputation, DEFAULT_REPUTATION, MAX_TRUSTED_PEER_REPUTATION_CHANGE, + }, + }, + ConnectionsConfig, PeersConfig, ReputationChangeWeights, +}; +use reth_primitives::ForkId; use std::{ collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, fmt::Display, - io::{self, ErrorKind}, + io::{self}, net::{IpAddr, SocketAddr}, - path::Path, task::{Context, Poll}, time::Duration, }; @@ -31,7 +34,7 @@ use tokio::{ time::{Instant, Interval}, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{info, trace}; +use tracing::trace; /// A communication channel to the [`PeersManager`] to apply manual changes to the peer set. #[derive(Clone, Debug)] @@ -152,13 +155,17 @@ impl PeersManager { let mut peers = HashMap::with_capacity(trusted_nodes.len() + basic_nodes.len()); let mut trusted_peer_ids = HashSet::with_capacity(trusted_nodes.len()); - for NodeRecord { address, tcp_port, udp_port: _, id } in trusted_nodes { + for NodeRecord { address, tcp_port, udp_port, id } in trusted_nodes { trusted_peer_ids.insert(id); - peers.entry(id).or_insert_with(|| Peer::trusted(SocketAddr::from((address, tcp_port)))); + peers.entry(id).or_insert_with(|| { + Peer::trusted(PeerAddr::new_with_ports(address, tcp_port, Some(udp_port))) + }); } - for NodeRecord { address, tcp_port, udp_port: _, id } in basic_nodes { - peers.entry(id).or_insert_with(|| Peer::new(SocketAddr::from((address, tcp_port)))); + for NodeRecord { address, tcp_port, udp_port, id } in basic_nodes { + peers.entry(id).or_insert_with(|| { + Peer::new(PeerAddr::new_with_ports(address, tcp_port, Some(udp_port))) + }); } Self { @@ -170,7 +177,7 @@ impl PeersManager { reputation_weights, refill_slots_interval: tokio::time::interval(refill_slots_interval), release_interval: tokio::time::interval_at(now + unban_interval, unban_interval), - connection_info, + connection_info: ConnectionInfo::new(connection_info), ban_list, backed_off_peers: Default::default(), ban_duration, @@ -195,7 +202,29 @@ impl PeersManager { /// Returns an iterator over all peers pub(crate) fn iter_peers(&self) -> impl Iterator + '_ { - self.peers.iter().map(|(peer_id, v)| NodeRecord::new(v.addr, *peer_id)) + self.peers.iter().map(|(peer_id, v)| { + NodeRecord::new_with_ports( + v.addr.tcp.ip(), + v.addr.tcp.port(), + v.addr.udp.map(|addr| addr.port()), + *peer_id, + ) + }) + } + + /// Returns the `NodeRecord` and `PeerKind` for the given peer id + pub(crate) fn peer_by_id(&self, peer_id: PeerId) -> Option<(NodeRecord, PeerKind)> { + self.peers.get(&peer_id).map(|v| { + ( + NodeRecord::new_with_ports( + v.addr.tcp.ip(), + v.addr.tcp.port(), + v.addr.udp.map(|addr| addr.port()), + peer_id, + ), + v.kind, + ) + }) } /// Returns an iterator over all peer ids for peers with the given kind @@ -238,9 +267,7 @@ impl PeersManager { return Err(InboundConnectionError::IpBanned); } - if (!self.connection_info.has_in_capacity() || self.connection_info.max_inbound == 0) && - self.trusted_peer_ids.is_empty() - { + if !self.connection_info.has_in_capacity() && self.trusted_peer_ids.is_empty() { // if we don't have any inbound slots and no trusted peers, we don't accept any new // connections return Err(InboundConnectionError::ExceedsCapacity); @@ -324,33 +351,24 @@ impl PeersManager { peer.state = PeerConnectionState::In; is_trusted = is_trusted || peer.is_trusted(); - - // if a peer is not trusted and we don't have capacity for more inbound connections, - // disconnecting the peer - if !is_trusted && !has_in_capacity { - self.queued_actions.push_back(PeerAction::Disconnect { - peer_id, - reason: Some(DisconnectReason::TooManyPeers), - }); - } } Entry::Vacant(entry) => { // peer is missing in the table, we add it but mark it as to be removed after // disconnect, because we only know the outgoing port - let mut peer = Peer::with_state(addr, PeerConnectionState::In); + let mut peer = Peer::with_state(PeerAddr::tcp(addr), PeerConnectionState::In); peer.remove_after_disconnect = true; entry.insert(peer); self.queued_actions.push_back(PeerAction::PeerAdded(peer_id)); - - // disconnect the peer if we don't have capacity for more inbound connections - if !is_trusted && !has_in_capacity { - self.queued_actions.push_back(PeerAction::Disconnect { - peer_id, - reason: Some(DisconnectReason::TooManyPeers), - }); - } } } + + // disconnect the peer if we don't have capacity for more inbound connections + if !is_trusted && !has_in_capacity { + self.queued_actions.push_back(PeerAction::Disconnect { + peer_id, + reason: Some(DisconnectReason::TooManyPeers), + }); + } } /// Bans the peer temporarily with the configured ban timeout @@ -662,7 +680,7 @@ impl PeersManager { /// Called for a newly discovered peer. /// /// If the peer already exists, then the address, kind and `fork_id` will be updated. - pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: SocketAddr, fork_id: Option) { + pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: PeerAddr, fork_id: Option) { self.add_peer_kind(peer_id, PeerKind::Basic, addr, fork_id) } @@ -675,7 +693,7 @@ impl PeersManager { /// /// If the peer already exists, then the address and kind will be updated. #[allow(dead_code)] - pub(crate) fn add_trusted_peer(&mut self, peer_id: PeerId, addr: SocketAddr) { + pub(crate) fn add_trusted_peer(&mut self, peer_id: PeerId, addr: PeerAddr) { self.add_peer_kind(peer_id, PeerKind::Trusted, addr, None) } @@ -686,11 +704,11 @@ impl PeersManager { &mut self, peer_id: PeerId, kind: PeerKind, - addr: SocketAddr, + addr: PeerAddr, fork_id: Option, ) { - if self.ban_list.is_banned(&peer_id, &addr.ip()) { - return; + if self.ban_list.is_banned(&peer_id, &addr.tcp.ip()) { + return } match self.peers.entry(peer_id) { @@ -708,7 +726,7 @@ impl PeersManager { } } Entry::Vacant(entry) => { - trace!(target: "net::peers", ?peer_id, ?addr, "discovered new node"); + trace!(target: "net::peers", ?peer_id, ?addr.tcp, "discovered new node"); let mut peer = Peer::with_kind(addr, kind); peer.fork_id = fork_id; entry.insert(peer); @@ -812,7 +830,7 @@ impl PeersManager { return; } - // as long as there a slots available fill them with the best peers + // as long as there are slots available fill them with the best peers while self.connection_info.has_out_capacity() { let action = { let (peer_id, peer) = match self.best_unconnected() { @@ -823,7 +841,7 @@ impl PeersManager { trace!(target: "net::peers", ?peer_id, addr=?peer.addr, "schedule outbound connection"); peer.state = PeerConnectionState::PendingOut; - PeerAction::Connect { peer_id, remote_addr: peer.addr } + PeerAction::Connect { peer_id, remote_addr: peer.addr.tcp } }; self.connection_info.inc_pending_out(); @@ -861,7 +879,7 @@ impl PeersManager { while let Poll::Ready(Some(cmd)) = self.handle_rx.poll_next_unpin(cx) { match cmd { PeerCommand::Add(peer_id, addr) => { - self.add_peer(peer_id, addr, None); + self.add_peer(peer_id, PeerAddr::tcp(addr), None); } PeerCommand::Remove(peer) => self.remove_peer(peer), PeerCommand::ReputationChange(peer_id, rep) => { @@ -918,42 +936,37 @@ impl Default for PeersManager { } /// Tracks stats about connected nodes -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize), serde(default))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct ConnectionInfo { /// Counter for currently occupied slots for active outbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_outbound: usize, /// Counter for pending outbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_pending_out: usize, /// Counter for currently occupied slots for active inbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_inbound: usize, /// Counter for pending inbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_pending_in: usize, - /// Maximum allowed outbound connections. - max_outbound: usize, - /// Maximum allowed inbound connections. - max_inbound: usize, - /// Maximum allowed concurrent outbound dials. - #[cfg_attr(feature = "serde", serde(default))] - max_concurrent_outbound_dials: usize, + /// Restrictions on number of connections. + config: ConnectionsConfig, } // === impl ConnectionInfo === impl ConnectionInfo { + /// Returns a new [`ConnectionInfo`] with the given config. + const fn new(config: ConnectionsConfig) -> Self { + Self { config, num_outbound: 0, num_pending_out: 0, num_inbound: 0, num_pending_in: 0 } + } + /// Returns `true` if there's still capacity for a new outgoing connection. const fn has_out_capacity(&self) -> bool { - self.num_pending_out < self.max_concurrent_outbound_dials && - self.num_outbound < self.max_outbound + self.num_pending_out < self.config.max_concurrent_outbound_dials && + self.num_outbound < self.config.max_outbound } /// Returns `true` if there's still capacity for a new incoming connection. const fn has_in_capacity(&self) -> bool { - self.num_inbound < self.max_inbound + self.num_inbound < self.config.max_inbound } fn decr_state(&mut self, state: PeerConnectionState) { @@ -998,25 +1011,43 @@ impl ConnectionInfo { } } -impl Default for ConnectionInfo { - fn default() -> Self { - Self { - num_outbound: 0, - num_inbound: 0, - max_outbound: DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize, - max_inbound: DEFAULT_MAX_COUNT_PEERS_INBOUND as usize, - max_concurrent_outbound_dials: DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS, - num_pending_out: 0, - num_pending_in: 0, - } +/// Represents a peer's address information. +/// +/// # Fields +/// +/// - `tcp`: A `SocketAddr` representing the peer's data transfer address. +/// - `udp`: An optional `SocketAddr` representing the peer's discover address. `None` if the peer +/// is directly connecting to us or the port is the same to `tcp`'s +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct PeerAddr { + tcp: SocketAddr, + udp: Option, +} + +impl PeerAddr { + /// Returns a new `PeerAddr` with the given `tcp` and `udp` addresses. + pub const fn new(tcp: SocketAddr, udp: Option) -> Self { + Self { tcp, udp } + } + + /// Returns a new `PeerAddr` with a `tcp` address only. + pub const fn tcp(tcp: SocketAddr) -> Self { + Self { tcp, udp: None } + } + + /// Returns a new `PeerAddr` with the given `tcp` and `udp` ports. + fn new_with_ports(ip: IpAddr, tcp_port: u16, udp_port: Option) -> Self { + let tcp = SocketAddr::new(ip, tcp_port); + let udp = udp_port.map(|port| SocketAddr::new(ip, port)); + Self::new(tcp, udp) } } /// Tracks info about a single peer. #[derive(Debug, Clone)] pub struct Peer { - /// Where to reach the peer - addr: SocketAddr, + /// Where to reach the peer. + addr: PeerAddr, /// Reputation of the peer. reputation: i32, /// The state of the connection, if any. @@ -1029,18 +1060,19 @@ pub struct Peer { kind: PeerKind, /// Whether the peer is currently backed off. backed_off: bool, - /// Counts number of times the peer was backed off due to a severe [`BackoffKind`]. + /// Counts number of times the peer was backed off due to a severe + /// [`reth_network_types::BackoffKind`]. severe_backoff_counter: u8, } // === impl Peer === impl Peer { - fn new(addr: SocketAddr) -> Self { + fn new(addr: PeerAddr) -> Self { Self::with_state(addr, Default::default()) } - fn trusted(addr: SocketAddr) -> Self { + fn trusted(addr: PeerAddr) -> Self { Self { kind: PeerKind::Trusted, ..Self::new(addr) } } @@ -1049,7 +1081,7 @@ impl Peer { self.reputation } - fn with_state(addr: SocketAddr, state: PeerConnectionState) -> Self { + fn with_state(addr: PeerAddr, state: PeerConnectionState) -> Self { Self { addr, state, @@ -1062,7 +1094,7 @@ impl Peer { } } - fn with_kind(addr: SocketAddr, kind: PeerKind) -> Self { + fn with_kind(addr: PeerAddr, kind: PeerKind) -> Self { Self { kind, ..Self::new(addr) } } @@ -1263,265 +1295,6 @@ pub enum PeerAction { PeerRemoved(PeerId), } -/// Config type for initiating a [`PeersManager`] instance. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "serde", serde(default))] -pub struct PeersConfig { - /// How often to recheck free slots for outbound connections. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub refill_slots_interval: Duration, - /// Trusted nodes to connect to or accept from - pub trusted_nodes: HashSet, - /// Connect to or accept from trusted nodes only? - #[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))] - pub trusted_nodes_only: bool, - /// Maximum number of backoff attempts before we give up on a peer and dropping. - /// - /// The max time spent of a peer before it's removed from the set is determined by the - /// configured backoff duration and the max backoff count. - /// - /// With a backoff counter of 5 and a backoff duration of 1h, the minimum time spent of the - /// peer in the table is the sum of all backoffs (1h + 2h + 3h + 4h + 5h = 15h). - /// - /// Note: this does not apply to trusted peers. - pub max_backoff_count: u8, - /// Basic nodes to connect to. - #[cfg_attr(feature = "serde", serde(skip))] - pub basic_nodes: HashSet, - /// How long to ban bad peers. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub ban_duration: Duration, - /// Restrictions on `PeerIds` and Ips. - #[cfg_attr(feature = "serde", serde(skip))] - pub ban_list: BanList, - /// Restrictions on connections. - pub connection_info: ConnectionInfo, - /// How to weigh reputation changes. - pub reputation_weights: ReputationChangeWeights, - /// How long to backoff peers that are we failed to connect to for non-fatal reasons, such as - /// [`DisconnectReason::TooManyPeers`]. - /// - /// The backoff duration increases with number of backoff attempts. - pub backoff_durations: PeerBackoffDurations, -} - -impl Default for PeersConfig { - fn default() -> Self { - Self { - refill_slots_interval: Duration::from_millis(5_000), - connection_info: Default::default(), - reputation_weights: Default::default(), - ban_list: Default::default(), - // Ban peers for 12h - ban_duration: Duration::from_secs(60 * 60 * 12), - backoff_durations: Default::default(), - trusted_nodes: Default::default(), - trusted_nodes_only: false, - basic_nodes: Default::default(), - max_backoff_count: 5, - } - } -} - -impl PeersConfig { - /// A set of `peer_ids` and ip addr that we want to never connect to - pub fn with_ban_list(mut self, ban_list: BanList) -> Self { - self.ban_list = ban_list; - self - } - - /// Configure how long to ban bad peers - pub const fn with_ban_duration(mut self, ban_duration: Duration) -> Self { - self.ban_duration = ban_duration; - self - } - - /// Maximum occupied slots for outbound connections. - pub const fn with_max_pending_outbound(mut self, num_outbound: usize) -> Self { - self.connection_info.num_outbound = num_outbound; - self - } - - /// Maximum occupied slots for inbound connections. - pub const fn with_max_pending_inbound(mut self, num_inbound: usize) -> Self { - self.connection_info.num_inbound = num_inbound; - self - } - - /// Maximum allowed outbound connections. - pub const fn with_max_outbound(mut self, max_outbound: usize) -> Self { - self.connection_info.max_outbound = max_outbound; - self - } - - /// Maximum allowed inbound connections with optional update. - pub const fn with_max_inbound_opt(mut self, max_inbound: Option) -> Self { - if let Some(max_inbound) = max_inbound { - self.connection_info.max_inbound = max_inbound; - } - self - } - - /// Maximum allowed outbound connections with optional update. - pub const fn with_max_outbound_opt(mut self, max_outbound: Option) -> Self { - if let Some(max_outbound) = max_outbound { - self.connection_info.max_outbound = max_outbound; - } - self - } - - /// Maximum allowed inbound connections. - pub const fn with_max_inbound(mut self, max_inbound: usize) -> Self { - self.connection_info.max_inbound = max_inbound; - self - } - - /// Maximum allowed concurrent outbound dials. - pub const fn with_max_concurrent_dials(mut self, max_concurrent_outbound_dials: usize) -> Self { - self.connection_info.max_concurrent_outbound_dials = max_concurrent_outbound_dials; - self - } - - /// Nodes to always connect to. - pub fn with_trusted_nodes(mut self, nodes: HashSet) -> Self { - self.trusted_nodes = nodes; - self - } - - /// Connect only to trusted nodes. - pub const fn with_trusted_nodes_only(mut self, trusted_only: bool) -> Self { - self.trusted_nodes_only = trusted_only; - self - } - - /// Nodes available at launch. - pub fn with_basic_nodes(mut self, nodes: HashSet) -> Self { - self.basic_nodes = nodes; - self - } - - /// Configures the max allowed backoff count. - pub const fn with_max_backoff_count(mut self, max_backoff_count: u8) -> Self { - self.max_backoff_count = max_backoff_count; - self - } - - /// Configures how to weigh reputation changes. - pub const fn with_reputation_weights( - mut self, - reputation_weights: ReputationChangeWeights, - ) -> Self { - self.reputation_weights = reputation_weights; - self - } - - /// Configures how long to backoff peers that are we failed to connect to for non-fatal reasons - pub const fn with_backoff_durations(mut self, backoff_durations: PeerBackoffDurations) -> Self { - self.backoff_durations = backoff_durations; - self - } - - /// Returns the maximum number of peers, inbound and outbound. - pub const fn max_peers(&self) -> usize { - self.connection_info.max_outbound + self.connection_info.max_inbound - } - - /// Read from file nodes available at launch. Ignored if None. - pub fn with_basic_nodes_from_file( - self, - optional_file: Option>, - ) -> Result { - let Some(file_path) = optional_file else { return Ok(self) }; - let reader = match std::fs::File::open(file_path.as_ref()) { - Ok(file) => io::BufReader::new(file), - Err(e) if e.kind() == ErrorKind::NotFound => return Ok(self), - Err(e) => Err(e)?, - }; - info!(target: "net::peers", file = %file_path.as_ref().display(), "Loading saved peers"); - let nodes: HashSet = serde_json::from_reader(reader)?; - Ok(self.with_basic_nodes(nodes)) - } - - /// Returns settings for testing - #[cfg(test)] - fn test() -> Self { - Self { - refill_slots_interval: Duration::from_millis(100), - backoff_durations: PeerBackoffDurations::test(), - ..Default::default() - } - } -} - -/// The durations to use when a backoff should be applied to a peer. -/// -/// See also [`BackoffKind`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct PeerBackoffDurations { - /// Applies to connection problems where there is a chance that they will be resolved after the - /// short duration. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub low: Duration, - /// Applies to more severe connection problems where there is a lower chance that they will be - /// resolved. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub medium: Duration, - /// Intended for spammers, or bad peers in general. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub high: Duration, - /// Maximum total backoff duration. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub max: Duration, -} - -impl PeerBackoffDurations { - /// Returns the corresponding [`Duration`] - pub const fn backoff(&self, kind: BackoffKind) -> Duration { - match kind { - BackoffKind::Low => self.low, - BackoffKind::Medium => self.medium, - BackoffKind::High => self.high, - } - } - - /// Returns the timestamp until which we should backoff. - /// - /// The Backoff duration is capped by the configured maximum backoff duration. - pub fn backoff_until(&self, kind: BackoffKind, backoff_counter: u8) -> std::time::Instant { - let backoff_time = self.backoff(kind); - let backoff_time = backoff_time + backoff_time * backoff_counter as u32; - let now = std::time::Instant::now(); - now + backoff_time.min(self.max) - } - - /// Returns durations for testing. - #[cfg(test)] - const fn test() -> Self { - Self { - low: Duration::from_millis(200), - medium: Duration::from_millis(200), - high: Duration::from_millis(200), - max: Duration::from_millis(200), - } - } -} - -impl Default for PeerBackoffDurations { - fn default() -> Self { - Self { - low: Duration::from_secs(30), - // 3min - medium: Duration::from_secs(60 * 3), - // 15min - high: Duration::from_secs(60 * 15), - // 1h - max: Duration::from_secs(60 * 60), - } - } -} - /// Error thrown when a incoming connection is rejected right away #[derive(Debug, Error, PartialEq, Eq)] pub enum InboundConnectionError { @@ -1541,11 +1314,9 @@ impl Display for InboundConnectionError { mod tests { use super::PeersManager; use crate::{ - error::BackoffKind, peers::{ - manager::{ConnectionInfo, PeerBackoffDurations, PeerConnectionState}, - reputation::DEFAULT_REPUTATION, - InboundConnectionError, PeerAction, + ConnectionInfo, InboundConnectionError, PeerAction, PeerAddr, PeerBackoffDurations, + PeerConnectionState, }, session::PendingSessionHandshakeError, PeersConfig, @@ -1555,9 +1326,10 @@ mod tests { errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, DisconnectReason, }; - use reth_net_common::ban_list::BanList; + use reth_net_banlist::BanList; use reth_network_api::{Direction, ReputationChangeKind}; use reth_network_peers::PeerId; + use reth_network_types::{peers::reputation::DEFAULT_REPUTATION, BackoffKind}; use reth_primitives::B512; use std::{ collections::HashSet, @@ -1592,7 +1364,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1607,6 +1379,37 @@ mod tests { } _ => unreachable!(), } + + let (record, _) = peers.peer_by_id(peer).unwrap(); + assert_eq!(record.tcp_addr(), socket_addr); + assert_eq!(record.udp_addr(), socket_addr); + } + + #[tokio::test] + async fn test_insert_udp() { + let peer = PeerId::random(); + let tcp_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); + let udp_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); + let mut peers = PeersManager::default(); + peers.add_peer(peer, PeerAddr::new(tcp_addr, Some(udp_addr)), None); + + match event!(peers) { + PeerAction::PeerAdded(peer_id) => { + assert_eq!(peer_id, peer); + } + _ => unreachable!(), + } + match event!(peers) { + PeerAction::Connect { peer_id, remote_addr } => { + assert_eq!(peer_id, peer); + assert_eq!(remote_addr, tcp_addr); + } + _ => unreachable!(), + } + + let (record, _) = peers.peer_by_id(peer).unwrap(); + assert_eq!(record.tcp_addr(), tcp_addr); + assert_eq!(record.udp_addr(), udp_addr); } #[tokio::test] @@ -1615,7 +1418,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); peers.ban_peer(peer); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::BanPeer { peer_id } => { @@ -1637,7 +1440,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); peers.ban_peer(peer); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::BanPeer { peer_id } => { @@ -1674,7 +1477,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::new(PeersConfig::test()); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1733,7 +1536,7 @@ mod tests { let backoff_durations = PeerBackoffDurations::test(); let config = PeersConfig { backoff_durations, ..PeersConfig::test() }; let mut peers = PeersManager::new(config); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1790,7 +1593,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let config = PeersConfig::test(); let mut peers = PeersManager::new(config); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let peer_struct = peers.peers.get_mut(&peer).unwrap(); let backoff_timestamp = peers @@ -1807,7 +1610,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let config = PeersConfig::default(); let mut peers = PeersManager::new(config); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let peer_struct = peers.peers.get_mut(&peer).unwrap(); // Simulate a peer that was already backed off once @@ -1835,7 +1638,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1892,7 +1695,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let config = PeersConfig::test(); let mut peers = PeersManager::new(config.clone()); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let peer_struct = peers.peers.get_mut(&peer).unwrap(); // Simulate a peer that was already backed off once @@ -1946,7 +1749,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2058,7 +1861,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2106,7 +1909,7 @@ mod tests { peers.add_trusted_peer_id(trusted); // saturate the inbound slots - for i in 0..peers.connection_info.max_inbound { + for i in 0..peers.connection_info.config.max_inbound { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, i as u8)), 8008); assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); let peer_id = PeerId::random(); @@ -2176,7 +1979,7 @@ mod tests { // to increase by 1 peers.on_incoming_session_established(peer, socket_addr); let p = peers.peers.get_mut(&peer).expect("peer not found"); - assert_eq!(p.addr, socket_addr); + assert_eq!(p.addr.tcp, socket_addr); assert_eq!(peers.connection_info.num_pending_in, 0); assert_eq!(peers.connection_info.num_inbound, 1); @@ -2191,7 +1994,7 @@ mod tests { peers.on_already_connected(Direction::Incoming); let p = peers.peers.get_mut(&peer).expect("peer not found"); - assert_eq!(p.addr, socket_addr); + assert_eq!(p.addr.tcp, socket_addr); assert_eq!(peers.connection_info.num_pending_in, 0); assert_eq!(peers.connection_info.num_inbound, 1); } @@ -2201,7 +2004,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_trusted_peer(peer, socket_addr); + peers.add_trusted_peer(peer, PeerAddr::tcp(socket_addr)); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2253,7 +2056,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); assert_eq!(peers.get_reputation(&peer), Some(0)); peers.apply_reputation_change(&peer, ReputationChangeKind::Other(1024)); @@ -2268,7 +2071,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2305,7 +2108,7 @@ mod tests { let p = peers.peers.get(&peer).unwrap(); assert_eq!(p.state, PeerConnectionState::PendingOut); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let p = peers.peers.get(&peer).unwrap(); assert_eq!(p.state, PeerConnectionState::PendingOut); @@ -2318,7 +2121,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2353,7 +2156,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2387,7 +2190,7 @@ mod tests { let ban_list = BanList::new(HashSet::new(), vec![ip]); let config = PeersConfig::default().with_ban_list(ban_list); let mut peer_manager = PeersManager::new(config); - peer_manager.add_peer(B512::default(), socket_addr, None); + peer_manager.add_peer(B512::default(), PeerAddr::tcp(socket_addr), None); assert!(peer_manager.peers.is_empty()); } @@ -2404,7 +2207,7 @@ mod tests { match a { Ok(_) => panic!(), Err(err) => match err { - super::InboundConnectionError::IpBanned {} => { + InboundConnectionError::IpBanned {} => { assert_eq!(peer_manager.connection_info.num_pending_in, 0) } _ => unreachable!(), @@ -2490,7 +2293,7 @@ mod tests { let basic_peer = PeerId::random(); let basic_sock = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); - peers.add_peer(basic_peer, basic_sock, None); + peers.add_peer(basic_peer, PeerAddr::tcp(basic_sock), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2530,7 +2333,7 @@ mod tests { let basic_peer = PeerId::random(); let basic_sock = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); - peers.add_peer(basic_peer, basic_sock, None); + peers.add_peer(basic_peer, PeerAddr::tcp(basic_sock), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2638,7 +2441,7 @@ mod tests { let config = PeersConfig::test(); let mut peer_manager = PeersManager::new(config); let peer_id = PeerId::random(); - peer_manager.add_peer(peer_id, socket_addr, None); + peer_manager.add_peer(peer_id, PeerAddr::tcp(socket_addr), None); tokio::time::sleep(Duration::from_secs(1)).await; peer_manager.tick(); @@ -2693,7 +2496,7 @@ mod tests { assert!(peer.remove_after_disconnect); // trigger discovery manually while the peer is still connected - peers.add_peer(peer_id, addr, None); + peers.add_peer(peer_id, PeerAddr::tcp(addr), None); peers.on_active_session_gracefully_closed(peer_id); @@ -2709,7 +2512,7 @@ mod tests { let mut peers = PeersManager::default(); peers.on_incoming_pending_session(addr.ip()).unwrap(); - peers.add_peer(peer_id, addr, None); + peers.add_peer(peer_id, PeerAddr::tcp(addr), None); match event!(peers) { PeerAction::PeerAdded(_) => {} @@ -2737,7 +2540,7 @@ mod tests { let mut peers = PeersManager::default(); peers.on_incoming_pending_session(addr.ip()).unwrap(); - peers.add_peer(peer_id, addr, None); + peers.add_peer(peer_id, PeerAddr::tcp(addr), None); match event!(peers) { PeerAction::PeerAdded(_) => {} @@ -2768,9 +2571,9 @@ mod tests { let config = PeersConfig::default(); let mut peer_manager = PeersManager::new(config); let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); - let socket_addr = SocketAddr::new(ip, 8008); - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { - peer_manager.add_peer(PeerId::random(), socket_addr, None); + let peer_addr = PeerAddr::tcp(SocketAddr::new(ip, 8008)); + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials * 2 { + peer_manager.add_peer(PeerId::random(), peer_addr, None); } peer_manager.fill_outbound_slots(); @@ -2779,7 +2582,7 @@ mod tests { .iter() .filter(|ev| matches!(ev, PeerAction::Connect { .. })) .count(); - assert_eq!(dials, peer_manager.connection_info.max_concurrent_outbound_dials); + assert_eq!(dials, peer_manager.connection_info.config.max_concurrent_outbound_dials); } #[tokio::test] @@ -2787,21 +2590,21 @@ mod tests { let config = PeersConfig::default(); let mut peer_manager = PeersManager::new(config); let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); - let socket_addr = SocketAddr::new(ip, 8008); + let peer_addr = PeerAddr::tcp(SocketAddr::new(ip, 8008)); // add more peers than allowed - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { - peer_manager.add_peer(PeerId::random(), socket_addr, None); + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials * 2 { + peer_manager.add_peer(PeerId::random(), peer_addr, None); } - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials * 2 { match event!(peer_manager) { PeerAction::PeerAdded(_) => {} _ => unreachable!(), } } - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials { + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials { match event!(peer_manager) { PeerAction::Connect { .. } => {} _ => unreachable!(), @@ -2813,7 +2616,7 @@ mod tests { // all dialed connections should be in 'PendingOut' state let dials = peer_manager.connection_info.num_pending_out; - assert_eq!(dials, peer_manager.connection_info.max_concurrent_outbound_dials); + assert_eq!(dials, peer_manager.connection_info.config.max_concurrent_outbound_dials); let num_pendingout_states = peer_manager .peers @@ -2823,7 +2626,7 @@ mod tests { .collect::>(); assert_eq!( num_pendingout_states.len(), - peer_manager.connection_info.max_concurrent_outbound_dials + peer_manager.connection_info.config.max_concurrent_outbound_dials ); // establish dialed connections diff --git a/crates/net/network/src/peers/mod.rs b/crates/net/network/src/peers/mod.rs deleted file mode 100644 index fafb2d7622e7..000000000000 --- a/crates/net/network/src/peers/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Peer related implementations - -mod manager; -mod reputation; - -pub(crate) use manager::InboundConnectionError; -pub use manager::{ConnectionInfo, Peer, PeerAction, PeersConfig, PeersHandle, PeersManager}; -pub use reputation::ReputationChangeWeights; -pub use reth_network_api::PeerKind; - -/// Maximum number of available slots for outbound sessions. -pub const DEFAULT_MAX_COUNT_PEERS_OUTBOUND: u32 = 100; - -/// Maximum number of available slots for inbound sessions. -pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; - -/// Maximum number of available slots for concurrent outgoing dials. -/// -/// This restricts how many outbound dials can be performed concurrently. -pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index 7be1c48a6a3a..2ae1b132df3f 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -6,9 +6,8 @@ use futures::Stream; use reth_eth_wire::{ capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, }; -use reth_network_api::Direction; +use reth_network_api::{Direction, PeerId}; use reth_primitives::BytesMut; -use reth_rpc_types::PeerId; use std::{ fmt, net::SocketAddr, diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index abbf77919c52..03ff50414831 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -3,14 +3,12 @@ use crate::{ message::{NewBlockMessage, PeerMessage, PeerRequest, PeerResponse, PeerResponseResult}, session::{ - config::INITIAL_REQUEST_TIMEOUT, conn::EthRlpxConnection, handle::{ActiveSessionMessage, SessionCommand}, SessionId, }, }; use core::sync::atomic::Ordering; -use fnv::FnvHashMap; use futures::{stream::Fuse, SinkExt, StreamExt}; use reth_eth_wire::{ capability::Capabilities, @@ -21,6 +19,8 @@ use reth_eth_wire::{ use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_p2p::error::RequestError; use reth_network_peers::PeerId; +use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT; +use rustc_hash::FxHashMap; use std::{ collections::VecDeque, future::Future, @@ -81,7 +81,7 @@ pub(crate) struct ActiveSession { /// Incoming internal requests which are delegated to the remote peer. pub(crate) internal_request_tx: Fuse>, /// All requests sent to the remote peer we're waiting on a response - pub(crate) inflight_requests: FnvHashMap, + pub(crate) inflight_requests: FxHashMap, /// All requests that were sent by the remote peer and we're waiting on an internal response pub(crate) received_requests_from_remote: Vec, /// Buffered messages that should be handled and sent to the peer. @@ -759,17 +759,16 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> #[cfg(test)] mod tests { use super::*; - use crate::session::{ - config::PROTOCOL_BREACH_REQUEST_TIMEOUT, handle::PendingSessionEvent, - start_pending_incoming_session, - }; + use crate::session::{handle::PendingSessionEvent, start_pending_incoming_session}; + use reth_chainspec::MAINNET; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; use reth_network_peers::pk2id; - use reth_primitives::{ForkFilter, Hardfork, MAINNET}; + use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT; + use reth_primitives::{EthereumHardfork, ForkFilter}; use secp256k1::{SecretKey, SECP256K1}; use tokio::{ net::{TcpListener, TcpStream}, @@ -919,7 +918,7 @@ mod tests { local_peer_id, status: StatusBuilder::default().build(), fork_filter: MAINNET - .hardfork_fork_filter(Hardfork::Frontier) + .hardfork_fork_filter(EthereumHardfork::Frontier) .expect("The Frontier fork filter should exist on mainnet"), } } diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs new file mode 100644 index 000000000000..0d8f764f206d --- /dev/null +++ b/crates/net/network/src/session/counter.rs @@ -0,0 +1,106 @@ +use reth_network_api::Direction; +use reth_network_types::SessionLimits; + +use super::ExceedsSessionLimit; + +/// Keeps track of all sessions. +#[derive(Debug, Clone)] +pub struct SessionCounter { + /// Limits to enforce. + limits: SessionLimits, + /// Number of pending incoming sessions. + pending_inbound: u32, + /// Number of pending outgoing sessions. + pending_outbound: u32, + /// Number of active inbound sessions. + active_inbound: u32, + /// Number of active outbound sessions. + active_outbound: u32, +} + +// === impl SessionCounter === + +impl SessionCounter { + pub(crate) const fn new(limits: SessionLimits) -> Self { + Self { + limits, + pending_inbound: 0, + pending_outbound: 0, + active_inbound: 0, + active_outbound: 0, + } + } + + pub(crate) fn inc_pending_inbound(&mut self) { + self.pending_inbound += 1; + } + + pub(crate) fn inc_pending_outbound(&mut self) { + self.pending_outbound += 1; + } + + pub(crate) fn dec_pending(&mut self, direction: &Direction) { + match direction { + Direction::Outgoing(_) => { + self.pending_outbound -= 1; + } + Direction::Incoming => { + self.pending_inbound -= 1; + } + } + } + + pub(crate) fn inc_active(&mut self, direction: &Direction) { + match direction { + Direction::Outgoing(_) => { + self.active_outbound += 1; + } + Direction::Incoming => { + self.active_inbound += 1; + } + } + } + + pub(crate) fn dec_active(&mut self, direction: &Direction) { + match direction { + Direction::Outgoing(_) => { + self.active_outbound -= 1; + } + Direction::Incoming => { + self.active_inbound -= 1; + } + } + } + + pub(crate) const fn ensure_pending_outbound(&self) -> Result<(), ExceedsSessionLimit> { + Self::ensure(self.pending_outbound, self.limits.max_pending_outbound) + } + + pub(crate) const fn ensure_pending_inbound(&self) -> Result<(), ExceedsSessionLimit> { + Self::ensure(self.pending_inbound, self.limits.max_pending_inbound) + } + + const fn ensure(current: u32, limit: Option) -> Result<(), ExceedsSessionLimit> { + if let Some(limit) = limit { + if current >= limit { + return Err(ExceedsSessionLimit(limit)) + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_limits() { + let mut limits = SessionCounter::new(SessionLimits::default().with_max_pending_inbound(2)); + assert!(limits.ensure_pending_outbound().is_ok()); + limits.inc_pending_inbound(); + assert!(limits.ensure_pending_inbound().is_ok()); + limits.inc_pending_inbound(); + assert!(limits.ensure_pending_inbound().is_err()); + } +} diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index b28b1e27e390..4c1a5e5315ac 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -11,8 +11,8 @@ use reth_eth_wire::{ errors::EthStreamError, DisconnectReason, EthVersion, Status, }; -use reth_network_api::PeerInfo; -use reth_network_peers::PeerId; +use reth_network_api::{PeerInfo, PeerKind}; +use reth_network_peers::{NodeRecord, PeerId}; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, @@ -136,10 +136,12 @@ impl ActiveSessionHandle { } /// Extracts the [`PeerInfo`] from the session handle. - pub(crate) fn peer_info(&self) -> PeerInfo { + pub(crate) fn peer_info(&self, record: &NodeRecord, kind: PeerKind) -> PeerInfo { PeerInfo { remote_id: self.remote_id, direction: self.direction, + enode: record.to_string(), + enr: None, remote_addr: self.remote_addr, local_addr: self.local_addr, capabilities: self.capabilities.clone(), @@ -147,6 +149,7 @@ impl ActiveSessionHandle { eth_version: self.version, status: self.status.clone(), session_established: self.established, + kind, } } } diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 060b669b3cf5..a8f1f2e1dd4d 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -1,11 +1,7 @@ //! Support for handling peer sessions. -use crate::{ - message::PeerMessage, - metrics::SessionManagerMetrics, - session::{active::ActiveSession, config::SessionCounter}, -}; -use fnv::FnvHashMap; +use crate::{message::PeerMessage, metrics::SessionManagerMetrics, session::active::ActiveSession}; +use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ @@ -15,10 +11,11 @@ use reth_eth_wire::{ UnauthedP2PStream, }; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_net_common::stream::HasRemoteAddr; use reth_network_peers::PeerId; +use reth_network_types::SessionsConfig; use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; +use rustc_hash::FxHashMap; use secp256k1::SecretKey; use std::{ collections::HashMap, @@ -38,12 +35,11 @@ use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; mod active; -mod config; mod conn; +mod counter; mod handle; pub use crate::message::PeerRequestSender; use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}; -pub use config::{SessionLimits, SessionsConfig}; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, SessionCommand, @@ -87,7 +83,7 @@ pub struct SessionManager { /// /// Events produced during the authentication phase are reported to this manager. Once the /// session is authenticated, it can be moved to the `active_session` set. - pending_sessions: FnvHashMap, + pending_sessions: FxHashMap, /// All active sessions that are ready to exchange messages. active_sessions: HashMap, /// The original Sender half of the [`PendingSessionEvent`] channel. @@ -174,6 +170,11 @@ impl SessionManager { self.secret_key } + /// Returns a borrowed reference to the active sessions. + pub const fn active_sessions(&self) -> &HashMap { + &self.active_sessions + } + /// Returns the session hello message. pub fn hello_message(&self) -> HelloMessageWithProtocols { self.hello_message.clone() @@ -591,35 +592,6 @@ impl SessionManager { } } } - - /// Returns [`PeerInfo`] for all connected peers - pub(crate) fn get_peer_info(&self) -> Vec { - self.active_sessions.values().map(ActiveSessionHandle::peer_info).collect() - } - - /// Returns [`PeerInfo`] for a given peer. - /// - /// Returns `None` if there's no active session to the peer. - pub(crate) fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { - self.active_sessions.get(&peer_id).map(ActiveSessionHandle::peer_info) - } - /// Returns [`PeerInfo`] for a given peer. - /// - /// Returns `None` if there's no active session to the peer. - pub(crate) fn get_peer_infos_by_ids( - &self, - peer_ids: impl IntoIterator, - ) -> Vec { - let mut infos = Vec::new(); - for peer_id in peer_ids { - if let Some(info) = - self.active_sessions.get(&peer_id).map(ActiveSessionHandle::peer_info) - { - infos.push(info); - } - } - infos - } } /// Events produced by the [`SessionManager`] @@ -927,7 +899,7 @@ async fn authenticate( /// Returns an [`ECIESStream`] if it can be built. If not, send a /// [`PendingSessionEvent::EciesAuthError`] and returns `None` -async fn get_eciess_stream( +async fn get_eciess_stream( stream: Io, secret_key: SecretKey, direction: Direction, @@ -1007,10 +979,7 @@ async fn authenticate_stream( (eth_stream.into(), their_status) } else { // Multiplex the stream with the extra protocols - let (mut multiplex_stream, their_status) = RlpxProtocolMultiplexer::new(p2p_stream) - .into_eth_satellite_stream(status, fork_filter) - .await - .unwrap(); + let mut multiplex_stream = RlpxProtocolMultiplexer::new(p2p_stream); // install additional handlers for handler in extra_handlers.into_iter() { @@ -1023,6 +992,19 @@ async fn authenticate_stream( .ok(); } + let (multiplex_stream, their_status) = + match multiplex_stream.into_eth_satellite_stream(status, fork_filter).await { + Ok((multiplex_stream, their_status)) => (multiplex_stream, their_status), + Err(err) => { + return PendingSessionEvent::Disconnected { + remote_addr, + session_id, + direction, + error: Some(PendingSessionHandshakeError::Eth(err)), + } + } + }; + (multiplex_stream.into(), their_status) }; diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 9a7b597eb59a..1087e781439c 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -9,7 +9,7 @@ use crate::{ BlockRequest, NewBlockMessage, PeerRequest, PeerRequestSender, PeerResponse, PeerResponseResult, }, - peers::{PeerAction, PeersManager}, + peers::{PeerAction, PeerAddr, PeersManager}, FetchClient, }; use rand::seq::SliceRandom; @@ -20,7 +20,7 @@ use reth_eth_wire::{ use reth_network_api::PeerKind; use reth_network_peers::PeerId; use reth_primitives::{ForkId, B256}; -use reth_provider::BlockNumReader; +use reth_storage_api::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -274,13 +274,14 @@ where } /// Adds a peer and its address with the given kind to the peerset. - pub(crate) fn add_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind, addr: SocketAddr) { + pub(crate) fn add_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind, addr: PeerAddr) { self.peers_manager.add_peer_kind(peer_id, kind, addr, None) } - pub(crate) fn remove_peer(&mut self, peer_id: PeerId, kind: PeerKind) { + /// Removes a peer and its address with the given kind from the peerset. + pub(crate) fn remove_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind) { match kind { - PeerKind::Basic => self.peers_manager.remove_peer(peer_id), + PeerKind::Basic | PeerKind::Static => self.peers_manager.remove_peer(peer_id), PeerKind::Trusted => self.peers_manager.remove_peer_from_trusted_set(peer_id), } } @@ -288,14 +289,10 @@ where /// Event hook for events received from the discovery service. fn on_discovery_event(&mut self, event: DiscoveryEvent) { match event { - DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { - peer_id, - socket_addr, - fork_id, - }) => { + DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id, addr, fork_id }) => { self.queued_messages.push_back(StateAction::DiscoveredNode { peer_id, - socket_addr, + addr, fork_id, }); } @@ -516,7 +513,7 @@ pub(crate) enum StateAction { fork_id: ForkId, }, /// A new node was found through the discovery, possibly with a `ForkId` - DiscoveredNode { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, + DiscoveredNode { peer_id: PeerId, addr: PeerAddr, fork_id: Option }, /// A peer was added PeerAdded(PeerId), /// A peer was dropped diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index ec1ca5e28a01..3c4d1419f80f 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -13,7 +13,7 @@ use reth_eth_wire::{ EthVersion, Status, }; use reth_network_peers::PeerId; -use reth_provider::{BlockNumReader, BlockReader}; +use reth_storage_api::BlockNumReader; use std::{ io, net::SocketAddr, @@ -247,14 +247,14 @@ where } StateAction::PeerAdded(peer_id) => return Some(SwarmEvent::PeerAdded(peer_id)), StateAction::PeerRemoved(peer_id) => return Some(SwarmEvent::PeerRemoved(peer_id)), - StateAction::DiscoveredNode { peer_id, socket_addr, fork_id } => { + StateAction::DiscoveredNode { peer_id, addr, fork_id } => { // Don't try to connect to peer if node is shutting down if self.is_shutting_down() { return None; } // Insert peer only if no fork id or a valid fork id if fork_id.map_or_else(|| true, |f| self.sessions.is_valid_fork_id(f)) { - self.state_mut().peers_mut().add_peer(peer_id, socket_addr, fork_id); + self.state_mut().peers_mut().add_peer(peer_id, addr, fork_id); } } StateAction::DiscoveredEnrForkId { peer_id, fork_id } => { @@ -287,7 +287,7 @@ where impl Stream for Swarm where - C: BlockReader + Unpin, + C: BlockNumReader + Unpin, { type Item = SwarmEvent; diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 6e1e6c46d542..3555360d281c 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -12,13 +12,12 @@ use crate::{ }; use futures::{FutureExt, StreamExt}; use pin_project::pin_project; +use reth_chainspec::MAINNET; use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; use reth_network_api::{NetworkInfo, Peers}; use reth_network_peers::PeerId; -use reth_primitives::MAINNET; -use reth_provider::{ - test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, -}; +use reth_provider::test_utils::NoopProvider; +use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; use reth_tasks::TokioTaskExecutor; use reth_tokio_util::EventStream; use reth_transaction_pool::{ diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 59ec103cdace..48fb8857cc35 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -57,9 +57,9 @@ pub mod tx_manager { /// Constants used by [`TransactionFetcher`](super::TransactionFetcher). pub mod tx_fetcher { - use crate::{ - peers::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}, - transactions::fetcher::TransactionFetcherInfo, + use crate::transactions::fetcher::TransactionFetcherInfo; + use reth_network_types::peers::config::{ + DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND, }; use super::{ diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index e00247f1f6e1..04d9b60238b4 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -1152,7 +1152,7 @@ pub struct GetPooledTxRequestFut { impl GetPooledTxRequestFut { #[inline] - fn new( + const fn new( peer_id: PeerId, requested_hashes: RequestTxHashes, response: oneshot::Receiver>, diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index ffb30a4d7994..b379a67044c6 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -4,8 +4,8 @@ use alloy_node_bindings::Geth; use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_discv4::Discv4Config; -use reth_eth_wire::DisconnectReason; -use reth_net_common::ban_list::BanList; +use reth_eth_wire::{DisconnectReason, HeadersDirection}; +use reth_net_banlist::BanList; use reth_network::{ test_utils::{enr_to_peer_id, NetworkEventStream, PeerConfig, Testnet, GETH_TIMEOUT}, NetworkConfigBuilder, NetworkEvent, NetworkEvents, NetworkManager, PeersConfig, @@ -15,7 +15,7 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, sync::{NetworkSyncUpdater, SyncState}, }; -use reth_primitives::{mainnet_nodes, HeadersDirection, NodeRecord}; +use reth_network_peers::{mainnet_nodes, NodeRecord}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::test_utils::testing_pool; use secp256k1::SecretKey; diff --git a/crates/net/network/tests/it/multiplex.rs b/crates/net/network/tests/it/multiplex.rs index 471afc562b3f..800dd370c6c6 100644 --- a/crates/net/network/tests/it/multiplex.rs +++ b/crates/net/network/tests/it/multiplex.rs @@ -10,10 +10,9 @@ use reth_network::{ protocol::{ConnectionHandler, OnNotSupported, ProtocolHandler}, test_utils::Testnet, }; -use reth_network_api::Direction; +use reth_network_api::{Direction, PeerId}; use reth_primitives::BytesMut; use reth_provider::test_utils::MockEthProvider; -use reth_rpc_types::PeerId; use std::{ net::SocketAddr, pin::Pin, diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 555acd08b248..85669dc9f8a0 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -2,6 +2,7 @@ //! Tests for eth related requests use rand::Rng; +use reth_eth_wire::HeadersDirection; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, NetworkEvents, @@ -12,8 +13,8 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, }; use reth_primitives::{ - Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionSigned, - TxEip2930, TxKind, U256, + Block, BlockBody, Bytes, Header, Signature, Transaction, TransactionSigned, TxEip2930, TxKind, + U256, }; use reth_provider::test_utils::MockEthProvider; use std::sync::Arc; diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index 0bef94e08c30..9317bef0fd4a 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -3,7 +3,7 @@ use reth_network::{ error::{NetworkError, ServiceKind}, Discovery, NetworkConfigBuilder, NetworkManager, }; -use reth_network_api::NetworkInfo; +use reth_network_api::{NetworkInfo, PeersInfo}; use reth_provider::test_utils::NoopProvider; use secp256k1::SecretKey; use std::{ @@ -59,8 +59,47 @@ async fn test_discovery_addr_in_use() { let any_port_listener = TcpListener::bind(addr).await.unwrap(); let port = any_port_listener.local_addr().unwrap().port(); let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port)); - let _discovery = Discovery::new(addr, secret_key, Some(disc_config), None, None).await.unwrap(); + let _discovery = + Discovery::new(addr, addr, secret_key, Some(disc_config), None, None).await.unwrap(); let disc_config = Discv4Config::default(); - let result = Discovery::new(addr, secret_key, Some(disc_config), None, None).await; + let result = Discovery::new(addr, addr, secret_key, Some(disc_config), None, None).await; assert!(is_addr_in_use_kind(&result.err().unwrap(), ServiceKind::Discovery(addr))); } + +// +#[tokio::test(flavor = "multi_thread")] +async fn test_tcp_port_node_record_no_discovery() { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let config = NetworkConfigBuilder::new(secret_key) + .listener_port(0) + .disable_discovery() + .build_with_noop_provider(); + let network = NetworkManager::new(config).await.unwrap(); + + let local_addr = network.local_addr(); + // ensure we retrieved the port the OS chose + assert_ne!(local_addr.port(), 0); + + let record = network.handle().local_node_record(); + assert_eq!(record.tcp_port, local_addr.port()); +} + +// +#[tokio::test(flavor = "multi_thread")] +async fn test_tcp_port_node_record_discovery() { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let config = NetworkConfigBuilder::new(secret_key) + .listener_port(0) + .discovery_port(0) + .disable_dns_discovery() + .build_with_noop_provider(); + let network = NetworkManager::new(config).await.unwrap(); + + let local_addr = network.local_addr(); + // ensure we retrieved the port the OS chose + assert_ne!(local_addr.port(), 0); + + let record = network.handle().local_node_record(); + assert_eq!(record.tcp_port, local_addr.port()); + assert_ne!(record.udp_port, 0); +} diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 6e3893ba6152..707b45e24b9f 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -4,12 +4,10 @@ use crate::{ error::PeerRequestResult, headers::client::{HeadersClient, SingleHeaderRequest}, }; -use futures::Stream; use reth_consensus::{Consensus, ConsensusError}; +use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{ - BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, B256, -}; +use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader, B256}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, @@ -635,69 +633,6 @@ where } } -/// A type that buffers the result of a range request so we can return it as a `Stream`. -struct FullBlockRangeStream -where - Client: BodiesClient + HeadersClient, -{ - /// The inner [`FetchFullBlockRangeFuture`] that is polled. - inner: FetchFullBlockRangeFuture, - /// The blocks that have been received so far. - /// - /// If this is `None` then the request is still in progress. If the vec is empty, then all of - /// the response values have been consumed. - blocks: Option>, -} - -impl From> for FullBlockRangeStream -where - Client: BodiesClient + HeadersClient, -{ - fn from(inner: FetchFullBlockRangeFuture) -> Self { - Self { inner, blocks: None } - } -} - -impl Stream for FullBlockRangeStream -where - Client: BodiesClient + HeadersClient + Unpin + 'static, -{ - type Item = SealedBlock; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - // If all blocks have been consumed, then return `None`. - if let Some(blocks) = &mut this.blocks { - if blocks.is_empty() { - // Stream is finished - return Poll::Ready(None); - } - - // return the next block if it's ready - the vec should be in ascending order since it - // is reversed right after it is received from the future, so we can just pop() the - // elements to return them from the stream in descending order - return Poll::Ready(blocks.pop()); - } - - // poll the inner future if the blocks are not yet ready - let mut blocks = ready!(Pin::new(&mut this.inner).poll(cx)); - - // the blocks are returned in descending order, reverse the list so we can just pop() the - // vec to yield the next block in the stream - blocks.reverse(); - - // pop the first block from the vec as the first stream element and store the rest - let first_result = blocks.pop(); - - // if the inner future is ready, then we can return the blocks - this.blocks = Some(blocks); - - // return the first block - Poll::Ready(first_result) - } -} - /// A request for a range of full blocks. Polling this will poll the inner headers and bodies /// futures until they return responses. It will return either the header or body result, depending /// on which future successfully returned. @@ -743,7 +678,6 @@ enum RangeResponseResult { mod tests { use super::*; use crate::test_utils::TestFullBlockClient; - use futures::StreamExt; use std::ops::Range; #[tokio::test] @@ -809,43 +743,6 @@ mod tests { } } - #[tokio::test] - async fn download_full_block_range_stream() { - let client = TestFullBlockClient::default(); - let (header, body) = insert_headers_into_client(&client, 0..50); - let client = FullBlockClient::test_client(client); - - let future = client.get_full_block_range(header.hash(), 1); - let mut stream = FullBlockRangeStream::from(future); - - // ensure only block in the stream is the one we requested - let received = stream.next().await.expect("response should not be None"); - assert_eq!(received, SealedBlock::new(header.clone(), body.clone())); - - // stream should be done now - assert_eq!(stream.next().await, None); - - // there are 11 total blocks - let future = client.get_full_block_range(header.hash(), 11); - let mut stream = FullBlockRangeStream::from(future); - - // check first header - let received = stream.next().await.expect("response should not be None"); - let mut curr_number = received.number; - assert_eq!(received, SealedBlock::new(header.clone(), body.clone())); - - // check the rest of the headers - for _ in 0..10 { - let received = stream.next().await.expect("response should not be None"); - assert_eq!(received.number, curr_number - 1); - curr_number = received.number; - } - - // ensure stream is done - let received = stream.next().await; - assert!(received.is_none()); - } - #[tokio::test] async fn download_full_block_range_over_soft_limit() { // default soft limit is 20, so we will request 50 blocks diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index 5b70aa1e5282..4a4b903a8261 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,7 +1,7 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; -pub use reth_eth_wire_types::BlockHeaders; -use reth_primitives::{BlockHashOrNumber, Header, HeadersDirection}; +pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; +use reth_primitives::{BlockHashOrNumber, Header}; use std::{ fmt::Debug, pin::Pin, diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index cfba59dbf745..731aa39e7e91 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -6,10 +6,10 @@ use crate::{ priority::Priority, }; use parking_lot::Mutex; +use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{ - BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, SealedBlock, - SealedHeader, B256, + BlockBody, BlockHashOrNumber, BlockNumHash, Header, SealedBlock, SealedHeader, B256, }; use std::{collections::HashMap, sync::Arc}; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 9cb8fe632cea..af4d7dfb285e 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -12,8 +12,9 @@ use crate::{ }; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{Header, HeadersDirection, SealedHeader}; +use reth_primitives::{Header, SealedHeader}; use std::{ fmt, pin::Pin, @@ -38,7 +39,7 @@ pub struct TestHeaderDownloader { impl TestHeaderDownloader { /// Instantiates the downloader with the mock responses - pub fn new( + pub const fn new( client: TestHeadersClient, consensus: Arc, limit: u64, diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 854ca3fbd5c8..5ac24edea759 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -25,13 +25,15 @@ secp256k1 = { workspace = true, optional = true } serde_with.workspace = true thiserror.workspace = true url.workspace = true -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, optional = true } [dev-dependencies] alloy-primitives = { workspace = true, features = ["rand"] } rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } serde_json.workspace = true +tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] -secp256k1 = ["dep:secp256k1"] +secp256k1 = ["dep:secp256k1", "enr/secp256k1"] +net = ["dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/bootnodes/ethereum.rs b/crates/net/peers/src/bootnodes/ethereum.rs new file mode 100644 index 000000000000..9cb6aac00e1f --- /dev/null +++ b/crates/net/peers/src/bootnodes/ethereum.rs @@ -0,0 +1,24 @@ +//! Ethereum bootnodes come from + +/// Ethereum Foundation Go Bootnodes +pub static MAINNET_BOOTNODES : [&str; 4] = [ + "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001 + "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001 + "enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303", // bootnode-hetzner-hel + "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn +]; + +/// Ethereum Foundation Sepolia Bootnodes +pub static SEPOLIA_BOOTNODES : [&str; 5] = [ + "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3 + "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3 + "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1 + "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1 + "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 +]; + +/// Ethereum Foundation Holesky Bootnodes +pub static HOLESKY_BOOTNODES : [&str; 2] = [ + "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", + "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", +]; diff --git a/crates/net/peers/src/bootnodes/mod.rs b/crates/net/peers/src/bootnodes/mod.rs new file mode 100644 index 000000000000..31c91e5d1cea --- /dev/null +++ b/crates/net/peers/src/bootnodes/mod.rs @@ -0,0 +1,49 @@ +//! Bootnodes for the network + +use crate::NodeRecord; + +mod ethereum; +pub use ethereum::*; + +mod optimism; +pub use optimism::*; + +/// Returns parsed mainnet nodes +pub fn mainnet_nodes() -> Vec { + parse_nodes(&MAINNET_BOOTNODES[..]) +} + +/// Returns parsed sepolia nodes +pub fn sepolia_nodes() -> Vec { + parse_nodes(&SEPOLIA_BOOTNODES[..]) +} + +/// Returns parsed holesky nodes +pub fn holesky_nodes() -> Vec { + parse_nodes(&HOLESKY_BOOTNODES[..]) +} + +/// Returns parsed op-stack mainnet nodes +pub fn op_nodes() -> Vec { + parse_nodes(OP_BOOTNODES) +} + +/// Returns parsed op-stack testnet nodes +pub fn op_testnet_nodes() -> Vec { + parse_nodes(OP_TESTNET_BOOTNODES) +} + +/// Returns parsed op-stack base mainnet nodes +pub fn base_nodes() -> Vec { + parse_nodes(OP_BOOTNODES) +} + +/// Returns parsed op-stack base testnet nodes +pub fn base_testnet_nodes() -> Vec { + parse_nodes(OP_TESTNET_BOOTNODES) +} + +/// Parses all the nodes +pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec { + nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect() +} diff --git a/crates/net/peers/src/bootnodes/optimism.rs b/crates/net/peers/src/bootnodes/optimism.rs new file mode 100644 index 000000000000..e3465721b1ca --- /dev/null +++ b/crates/net/peers/src/bootnodes/optimism.rs @@ -0,0 +1,26 @@ +//! OP bootnodes come from + +/// OP stack mainnet boot nodes. +pub static OP_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", + // Base + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" +]; + +/// OP stack testnet boot nodes. +pub static OP_TESTNET_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + // Base + "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", + "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", +]; diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index 1da12cd4a3a1..c36b7c564f39 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -39,6 +39,11 @@ //! - [`TrustedPeer`]: A [`NodeRecord`] with an optional domain name, which can be resolved to a //! [`NodeRecord`]. Useful for adding trusted peers at startup, whose IP address may not be //! static. +//! +//! +//! ## Feature Flags +//! +//! - `net`: Support for address lookups. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -63,6 +68,9 @@ pub use node_record::{NodeRecord, NodeRecordParseError}; pub mod trusted_peer; pub use trusted_peer::TrustedPeer; +mod bootnodes; +pub use bootnodes::*; + /// This tag should be set to indicate to libsecp256k1 that the following bytes denote an /// uncompressed pubkey. /// diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index 442948e637cb..2560d2211683 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -42,8 +42,10 @@ pub struct NodeRecord { } impl NodeRecord { + /// Derive the [`NodeRecord`] from the secret key and addr. + /// + /// Note: this will set both the TCP and UDP ports to the port of the addr. #[cfg(feature = "secp256k1")] - /// Derive the [`NodeRecord`] from the secret key and addr pub fn from_secret_key(addr: SocketAddr, sk: &secp256k1::SecretKey) -> Self { let pk = secp256k1::PublicKey::from_secret_key(secp256k1::SECP256K1, sk); let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); @@ -73,12 +75,34 @@ impl NodeRecord { self } + /// Sets the tcp port + pub const fn with_tcp_port(mut self, port: u16) -> Self { + self.tcp_port = port; + self + } + + /// Sets the udp port + pub const fn with_udp_port(mut self, port: u16) -> Self { + self.udp_port = port; + self + } + /// Creates a new record from a socket addr and peer id. - #[allow(dead_code)] pub const fn new(addr: SocketAddr, id: PeerId) -> Self { Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } } + /// Creates a new record from an ip address and ports. + pub fn new_with_ports( + ip_addr: IpAddr, + tcp_port: u16, + udp_port: Option, + id: PeerId, + ) -> Self { + let udp_port = udp_port.unwrap_or(tcp_port); + Self { address: ip_addr, tcp_port, udp_port, id } + } + /// The TCP socket address of this node #[must_use] pub const fn tcp_addr(&self) -> SocketAddr { diff --git a/crates/net/peers/src/trusted_peer.rs b/crates/net/peers/src/trusted_peer.rs index bd1923dcc612..aa7e0a015336 100644 --- a/crates/net/peers/src/trusted_peer.rs +++ b/crates/net/peers/src/trusted_peer.rs @@ -45,24 +45,42 @@ impl TrustedPeer { Self { host, tcp_port: port, udp_port: port, id } } + const fn to_node_record(&self, ip: IpAddr) -> NodeRecord { + NodeRecord { address: ip, id: self.id, tcp_port: self.tcp_port, udp_port: self.udp_port } + } + + /// Tries to resolve directly to a [`NodeRecord`] if the host is an IP address. + fn try_node_record(&self) -> Result { + match &self.host { + Host::Ipv4(ip) => Ok(self.to_node_record((*ip).into())), + Host::Ipv6(ip) => Ok(self.to_node_record((*ip).into())), + Host::Domain(domain) => Err(domain), + } + } + + /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. + /// + /// This use [`ToSocketAddr`](std::net::ToSocketAddrs) to resolve the host to an IP address. + pub fn resolve_blocking(&self) -> Result { + let domain = match self.try_node_record() { + Ok(record) => return Ok(record), + Err(domain) => domain, + }; + // Resolve the domain to an IP address + let mut ips = std::net::ToSocketAddrs::to_socket_addrs(&(domain, 0))?; + let ip = ips + .next() + .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + + Ok(self.to_node_record(ip.ip())) + } + /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. + #[cfg(any(test, feature = "net"))] pub async fn resolve(&self) -> Result { - let domain = match self.host.to_owned() { - Host::Ipv4(ip) => { - let id = self.id; - let tcp_port = self.tcp_port; - let udp_port = self.udp_port; - - return Ok(NodeRecord { address: ip.into(), id, tcp_port, udp_port }); - } - Host::Ipv6(ip) => { - let id = self.id; - let tcp_port = self.tcp_port; - let udp_port = self.udp_port; - - return Ok(NodeRecord { address: ip.into(), id, tcp_port, udp_port }); - } - Host::Domain(domain) => domain, + let domain = match self.try_node_record() { + Ok(record) => return Ok(record), + Err(domain) => domain, }; // Resolve the domain to an IP address @@ -70,12 +88,8 @@ impl TrustedPeer { let ip = ips .next() .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; - Ok(NodeRecord { - address: ip.ip(), - id: self.id, - tcp_port: self.tcp_port, - udp_port: self.udp_port, - }) + + Ok(self.to_node_record(ip.ip())) } } @@ -285,15 +299,16 @@ mod tests { TrustedPeer::new(url::Host::Domain(domain.to_owned()), 30300, PeerId::random()); // Resolve domain and validate - let rec = rec.resolve().await.unwrap(); - match rec.address { - std::net::IpAddr::V4(addr) => { + let ensure = |rec: NodeRecord| match rec.address { + IpAddr::V4(addr) => { assert_eq!(addr, std::net::Ipv4Addr::new(127, 0, 0, 1)) } - std::net::IpAddr::V6(addr) => { - assert_eq!(addr, std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)) + IpAddr::V6(addr) => { + assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)) } - } + }; + ensure(rec.resolve().await.unwrap()); + ensure(rec.resolve_blocking().unwrap()); } } } diff --git a/crates/node-core/src/metrics/version_metrics.rs b/crates/node-core/src/metrics/version_metrics.rs deleted file mode 100644 index f0b11c3b7e8b..000000000000 --- a/crates/node-core/src/metrics/version_metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! This exposes reth's version information over prometheus. - -use crate::version::build_profile_name; -use metrics::gauge; - -const LABELS: [(&str, &str); 6] = [ - ("version", env!("CARGO_PKG_VERSION")), - ("build_timestamp", env!("VERGEN_BUILD_TIMESTAMP")), - ("cargo_features", env!("VERGEN_CARGO_FEATURES")), - ("git_sha", env!("VERGEN_GIT_SHA")), - ("target_triple", env!("VERGEN_CARGO_TARGET_TRIPLE")), - ("build_profile", build_profile_name()), -]; - -/// This exposes reth's version information over prometheus. -pub fn register_version_metrics() { - let _gauge = gauge!("info", &LABELS); -} diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index b8649d858c92..15345c49b6c0 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -26,6 +26,32 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type Engine: EngineTypes; } +/// A [`NodeTypes`] type builder +#[derive(Default, Debug)] +pub struct AnyNodeTypes

(PhantomData

, PhantomData); + +impl AnyNodeTypes { + /// Sets the `Primitives` associated type. + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::, PhantomData::) + } + + /// Sets the `Engine` associated type. + pub const fn engine(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::) + } +} + +impl NodeTypes for AnyNodeTypes +where + P: NodePrimitives + Send + Sync + Unpin + 'static, + E: EngineTypes + Send + Sync + Unpin + 'static, +{ + type Primitives = P; + + type Engine = E; +} + /// A helper trait that is downstream of the [`NodeTypes`] trait and adds stateful components to the /// node. /// @@ -88,7 +114,7 @@ where } /// Encapsulates all types and components of the node. -pub trait FullNodeComponents: FullNodeTypes + 'static { +pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The transaction pool of the node. type Pool: TransactionPool + Unpin; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index f47ed8e7d4be..5a29b6e778a4 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] ## reth +reth-chainspec.workspace = true reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true @@ -20,7 +21,7 @@ reth-db-common.workspace = true reth-exex.workspace = true reth-evm.workspace = true reth-provider.workspace = true -reth-db = { workspace = true, features = ["mdbx"] } +reth-db = { workspace = true, features = ["mdbx"], optional = true } reth-db-api.workspace = true reth-rpc-engine-api.workspace = true reth-rpc.workspace = true @@ -44,6 +45,8 @@ reth-node-events.workspace = true reth-consensus.workspace = true reth-consensus-debug-client.workspace = true reth-rpc-types.workspace = true +reth-engine-util.workspace = true +reth-cli-util.workspace = true ## async futures.workspace = true @@ -55,10 +58,7 @@ tokio = { workspace = true, features = [ ] } tokio-stream.workspace = true -# ethereum -discv5.workspace = true - -# crypto +## crypto secp256k1 = { workspace = true, features = [ "global-context", "rand-std", @@ -73,5 +73,12 @@ confy.workspace = true rayon.workspace = true backon.workspace = true +# tracing +tracing.workspace = true + [dev-dependencies] tempfile.workspace = true + +[features] +default = [] +test-utils = ["reth-db/test-utils"] diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 02d1b0e04a5a..d46b73d76872 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -9,12 +9,9 @@ use crate::{ rpc::{RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, Node, NodeHandle, }; -use discv5::ListenConfig; use futures::Future; -use reth_db::{ - test_utils::{create_test_rw_db_with_path, tempdir_path, TempDatabase}, - DatabaseEnv, -}; +use reth_chainspec::ChainSpec; +use reth_cli_util::get_secret_key; use reth_db_api::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, @@ -25,23 +22,19 @@ use reth_network::{ }; use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; use reth_node_core::{ - args::{get_secret_key, DatadirArgs}, cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, - dirs::{ChainPath, DataDirPath, MaybePlatformPath}, + dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - primitives::{kzg::KzgSettings, Head}, - utils::write_peers_to_file, + primitives::Head, }; -use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; +use reth_primitives::revm_primitives::EnvKzgSettings; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; use secp256k1::SecretKey; pub use states::*; -use std::{ - net::{IpAddr, SocketAddr, SocketAddrV4, SocketAddrV6}, - sync::Arc, -}; +use std::sync::Arc; +use tracing::{info, trace, warn}; mod states; @@ -65,7 +58,7 @@ pub type RethFullAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter NodeBuilder { } /// Creates an _ephemeral_ preconfigured node for testing purposes. + #[cfg(feature = "test-utils")] pub fn testing_node( mut self, task_executor: TaskExecutor, - ) -> WithLaunchContext>>> { - let path = MaybePlatformPath::::from(tempdir_path()); - self.config = self - .config - .with_datadir_args(DatadirArgs { datadir: path.clone(), ..Default::default() }); + ) -> WithLaunchContext>>> + { + let path = reth_node_core::dirs::MaybePlatformPath::::from( + reth_db::test_utils::tempdir_path(), + ); + self.config = self.config.with_datadir_args(reth_node_core::args::DatadirArgs { + datadir: path.clone(), + ..Default::default() + }); let data_dir = path.unwrap_or_chain_default(self.config.chain.chain, self.config.datadir.clone()); - let db = create_test_rw_db_with_path(data_dir.db()); + let db = reth_db::test_utils::create_test_rw_db_with_path(data_dir.db()); WithLaunchContext { builder: self.with_database(db), task_executor } } @@ -458,14 +457,19 @@ impl BuilderContext { self.provider().chain_spec() } + /// Returns true if the node is configured as --dev + pub const fn is_dev(&self) -> bool { + self.config().dev.dev + } + /// Returns the transaction pool config of the node. pub fn pool_config(&self) -> PoolConfig { self.config().txpool.pool_config() } - /// Loads `MAINNET_KZG_TRUSTED_SETUP`. - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + /// Loads `EnvKzgSettings::Default`. + pub const fn kzg_settings(&self) -> eyre::Result { + Ok(EnvKzgSettings::Default) } /// Returns the config for payload building. @@ -506,7 +510,18 @@ impl BuilderContext { "p2p network task", |shutdown| { network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(&network, known_peers_file) + if let Some(peers_file) = known_peers_file { + let num_known_peers = network.num_known_peers(); + trace!(target: "reth::cli", peers_file=?peers_file, num_peers=%num_known_peers, "Saving current peers"); + match network.write_peers_to_file(peers_file.as_path()) { + Ok(_) => { + info!(target: "reth::cli", peers_file=?peers_file, "Wrote network peers to file"); + } + Err(err) => { + warn!(target: "reth::cli", %err, "Failed to write network peers to file"); + } + } + } }) }, ); @@ -524,12 +539,19 @@ impl BuilderContext { pub fn network_config_builder(&self) -> eyre::Result { let secret_key = self.network_secret(&self.config().datadir())?; let default_peers_path = self.config().datadir().known_peers(); - Ok(self.config().network.network_config( - self.reth_config(), - self.config().chain.clone(), - secret_key, - default_peers_path, - )) + let builder = self + .config() + .network + .network_config( + self.reth_config(), + self.config().chain.clone(), + secret_key, + default_peers_path, + ) + .with_task_executor(Box::new(self.executor.clone())) + .set_head(self.head); + + Ok(builder) } /// Get the network secret from the given data dir @@ -545,49 +567,7 @@ impl BuilderContext { &self, network_builder: NetworkConfigBuilder, ) -> NetworkConfig { - network_builder - .with_task_executor(Box::new(self.executor.clone())) - .set_head(self.head) - .listener_addr(SocketAddr::new( - self.config().network.addr, - // set discovery port based on instance number - self.config().network.port + self.config().instance - 1, - )) - .discovery_addr(SocketAddr::new( - self.config().network.discovery.addr, - // set discovery port based on instance number - self.config().network.discovery.port + self.config().instance - 1, - )) - .map_discv5_config_builder(|builder| { - // Use rlpx address if none given - let discv5_addr_ipv4 = self.config().network.discovery.discv5_addr.or( - match self.config().network.addr { - IpAddr::V4(ip) => Some(ip), - IpAddr::V6(_) => None, - }, - ); - let discv5_addr_ipv6 = self.config().network.discovery.discv5_addr_ipv6.or( - match self.config().network.addr { - IpAddr::V4(_) => None, - IpAddr::V6(ip) => Some(ip), - }, - ); - - let discv5_port_ipv4 = - self.config().network.discovery.discv5_port + self.config().instance - 1; - let discv5_port_ipv6 = - self.config().network.discovery.discv5_port_ipv6 + self.config().instance - 1; - - builder.discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( - discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), - discv5_addr_ipv6 - .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), - )) - .build(), - ) - }) - .build(self.provider.clone()) + network_builder.build(self.provider.clone()) } } diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index a20f7eaa71ed..8f09d5edd94b 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -31,7 +31,7 @@ pub struct NodeBuilderWithTypes { impl NodeBuilderWithTypes { /// Creates a new instance of the node builder with the given configuration and types. - pub fn new(config: NodeConfig, database: T::DB) -> Self { + pub const fn new(config: NodeConfig, database: T::DB) -> Self { Self { config, adapter: NodeTypesAdapter::new(database) } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 33ecd00bb46f..72d2e6933da5 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -2,11 +2,12 @@ use crate::{ components::{ - Components, ExecutorBuilder, NetworkBuilder, NodeComponents, PayloadServiceBuilder, - PoolBuilder, + Components, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, NodeComponents, + PayloadServiceBuilder, PoolBuilder, }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; @@ -31,19 +32,22 @@ use std::{future::Future, marker::PhantomData}; /// All component builders are captured in the builder state and will be consumed once the node is /// launched. #[derive(Debug)] -pub struct ComponentsBuilder { +pub struct ComponentsBuilder { pool_builder: PoolB, payload_builder: PayloadB, network_builder: NetworkB, executor_builder: ExecB, + consensus_builder: ConsB, _marker: PhantomData, } -impl - ComponentsBuilder +impl + ComponentsBuilder { /// Configures the node types. - pub fn node_types(self) -> ComponentsBuilder + pub fn node_types( + self, + ) -> ComponentsBuilder where Types: FullNodeTypes, { @@ -52,6 +56,7 @@ impl payload_builder, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } = self; ComponentsBuilder { @@ -59,6 +64,7 @@ impl pool_builder, payload_builder, network_builder, + consensus_builder, _marker: Default::default(), } } @@ -70,6 +76,7 @@ impl payload_builder: self.payload_builder, network_builder: self.network_builder, executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, _marker: self._marker, } } @@ -81,6 +88,7 @@ impl payload_builder: f(self.payload_builder), network_builder: self.network_builder, executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, _marker: self._marker, } } @@ -92,6 +100,7 @@ impl payload_builder: self.payload_builder, network_builder: f(self.network_builder), executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, _marker: self._marker, } } @@ -103,13 +112,26 @@ impl payload_builder: self.payload_builder, network_builder: self.network_builder, executor_builder: f(self.executor_builder), + consensus_builder: self.consensus_builder, + _marker: self._marker, + } + } + + /// Apply a function to the consensus builder. + pub fn map_consensus(self, f: impl FnOnce(ConsB) -> ConsB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: self.executor_builder, + consensus_builder: f(self.consensus_builder), _marker: self._marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, { @@ -120,7 +142,7 @@ where pub fn pool( self, pool_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PoolBuilder, { @@ -129,6 +151,7 @@ where payload_builder, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } = self; ComponentsBuilder { @@ -136,13 +159,14 @@ where payload_builder, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -154,7 +178,7 @@ where pub fn network( self, network_builder: NB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where NB: NetworkBuilder, { @@ -163,6 +187,7 @@ where payload_builder, network_builder: _, executor_builder: evm_builder, + consensus_builder, _marker, } = self; ComponentsBuilder { @@ -170,6 +195,7 @@ where payload_builder, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } } @@ -181,7 +207,7 @@ where pub fn payload( self, payload_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PayloadServiceBuilder, { @@ -190,6 +216,7 @@ where payload_builder: _, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } = self; ComponentsBuilder { @@ -197,6 +224,7 @@ where payload_builder, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } } @@ -208,32 +236,69 @@ where pub fn executor( self, executor_builder: EB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where EB: ExecutorBuilder, { - let Self { pool_builder, payload_builder, network_builder, executor_builder: _, _marker } = - self; + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: _, + consensus_builder, + _marker, + } = self; ComponentsBuilder { pool_builder, payload_builder, network_builder, executor_builder, + consensus_builder, + _marker, + } + } + + /// Configures the consensus builder. + /// + /// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's + /// components for consensus. + pub fn consensus( + self, + consensus_builder: CB, + ) -> ComponentsBuilder + where + CB: ConsensusBuilder, + { + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder, + consensus_builder: _, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder, + consensus_builder, _marker, } } } -impl NodeComponentsBuilder - for ComponentsBuilder +impl NodeComponentsBuilder + for ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, NetworkB: NetworkBuilder, PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, + ConsB: ConsensusBuilder, { - type Components = Components; + type Components = Components; async fn build_components( self, @@ -244,6 +309,7 @@ where payload_builder, network_builder, executor_builder: evm_builder, + consensus_builder, _marker, } = self; @@ -251,18 +317,27 @@ where let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; + let consensus = consensus_builder.build_consensus(context).await?; - Ok(Components { transaction_pool: pool, evm_config, network, payload_builder, executor }) + Ok(Components { + transaction_pool: pool, + evm_config, + network, + payload_builder, + executor, + consensus, + }) } } -impl Default for ComponentsBuilder<(), (), (), (), ()> { +impl Default for ComponentsBuilder<(), (), (), (), (), ()> { fn default() -> Self { Self { pool_builder: (), payload_builder: (), network_builder: (), executor_builder: (), + consensus_builder: (), _marker: Default::default(), } } @@ -288,16 +363,17 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm, Executor: BlockExecutorProvider, + Cons: Consensus + Clone + Unpin + 'static, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/consensus.rs b/crates/node/builder/src/components/consensus.rs new file mode 100644 index 000000000000..6c90bda54752 --- /dev/null +++ b/crates/node/builder/src/components/consensus.rs @@ -0,0 +1,32 @@ +//! Consensus component for the node builder. +use crate::{BuilderContext, FullNodeTypes}; +use std::future::Future; + +/// A type that knows how to build the consensus implementation. +pub trait ConsensusBuilder: Send { + /// The consensus implementation to build. + type Consensus: reth_consensus::Consensus + Clone + Unpin + 'static; + + /// Creates the consensus implementation. + fn build_consensus( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send; +} + +impl ConsensusBuilder for F +where + Node: FullNodeTypes, + Consensus: reth_consensus::Consensus + Clone + Unpin + 'static, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: Future> + Send, +{ + type Consensus = Consensus; + + fn build_consensus( + self, + ctx: &BuilderContext, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 8d0494470cb1..0419f7a71c74 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -9,16 +9,19 @@ use crate::{ConfigureEvm, FullNodeTypes}; pub use builder::*; +pub use consensus::*; pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; +use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; mod builder; +mod consensus; mod execute; mod network; mod payload; @@ -39,6 +42,9 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync /// The type that knows how to execute blocks. type Executor: BlockExecutorProvider; + /// The consensus type of the node. + type Consensus: Consensus + Clone + Unpin + 'static; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -48,6 +54,9 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync /// Returns the node's executor type. fn block_executor(&self) -> &Self::Executor; + /// Returns the node's consensus type. + fn consensus(&self) -> &Self::Consensus; + /// Returns the handle to the network fn network(&self) -> &NetworkHandle; @@ -59,29 +68,34 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. pub evm_config: EVM, /// The node's executor type used to execute individual blocks and batches of blocks. pub executor: Executor, + /// The consensus implementation of the node. + pub consensus: Consensus, /// The network implementation of the node. pub network: NetworkHandle, /// The handle to the payload builder service. pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents for Components +impl NodeComponents + for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm, Executor: BlockExecutorProvider, + Cons: Consensus + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; type Executor = Executor; + type Consensus = Cons; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -95,6 +109,10 @@ where &self.executor } + fn consensus(&self) -> &Self::Consensus { + &self.consensus + } + fn network(&self) -> &NetworkHandle { &self.network } @@ -104,18 +122,20 @@ where } } -impl Clone for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, EVM: ConfigureEvm, Executor: BlockExecutorProvider, + Cons: Consensus + Clone, { fn clone(&self) -> Self { Self { transaction_pool: self.transaction_pool.clone(), evm_config: self.evm_config.clone(), executor: self.executor.clone(), + consensus: self.consensus.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 395e5e19eb12..f57b3f010e61 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1,33 +1,49 @@ //! Helper types that can be used by launchers. +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + hooks::OnComponentInitializedHook, + BuilderContext, NodeAdapter, +}; use backon::{ConstantBuilder, Retryable}; use eyre::Context; use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; +use reth_blockchain_tree::{ + noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, +}; +use reth_chainspec::{Chain, ChainSpec}; use reth_config::{config::EtlConfig, PruneConfig}; +use reth_consensus::Consensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_network_p2p::headers::client::HeadersClient; +use reth_node_api::FullNodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, }; -use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, B256}; +use reth_primitives::{BlockNumber, Head, B256}; use reth_provider::{ - providers::StaticFileProvider, HeaderSyncMode, ProviderFactory, StaticFileProviderFactory, + providers::{BlockchainProvider, StaticFileProvider}, + CanonStateNotificationSender, ProviderFactory, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; +use reth_stages::{sets::DefaultStages, MetricEvent, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; -use std::{sync::Arc, thread::available_parallelism}; -use tokio::sync::{mpsc::Receiver, oneshot}; +use std::{marker::PhantomData, sync::Arc, thread::available_parallelism}; +use tokio::sync::{ + mpsc::{unbounded_channel, Receiver, UnboundedSender}, + oneshot, watch, +}; /// Reusable setup for launching a node. /// @@ -55,11 +71,11 @@ impl LaunchContext { /// `config`. /// /// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context. - pub async fn with_loaded_toml_config( + pub fn with_loaded_toml_config( self, config: NodeConfig, ) -> eyre::Result> { - let toml_config = self.load_toml_config(&config).await?; + let toml_config = self.load_toml_config(&config)?; Ok(self.with(WithConfigs { config, toml_config })) } @@ -67,7 +83,7 @@ impl LaunchContext { /// `config`. /// /// This is async because the trusted peers may have to be resolved. - pub async fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config()); let mut toml_config = confy::load_path::(&config_path) @@ -91,7 +107,7 @@ impl LaunchContext { ) -> eyre::Result<()> { if reth_config.prune.is_none() { if let Some(prune_config) = config.prune_config() { - reth_config.update_prune_confing(prune_config); + reth_config.update_prune_config(prune_config); info!(target: "reth::cli", "Saving prune config to toml file"); reth_config.save(config_path.as_ref())?; } @@ -193,15 +209,16 @@ impl LaunchContextWith { info!(target: "reth::cli", "Adding trusted nodes"); // resolve trusted peers if they use a domain instead of dns - for peer in &self.attachment.config.network.trusted_peers { + let resolved = futures::future::try_join_all( + self.attachment.config.network.trusted_peers.iter().map(|peer| async move { let backoff = ConstantBuilder::default() .with_max_times(self.attachment.config.network.dns_retries); - let resolved = (move || { peer.resolve() }) - .retry(&backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error resolving peer domain: {err}. Retrying...")) - .await?; - self.attachment.toml_config.peers.trusted_nodes.insert(resolved); - } + (move || { peer.resolve() }) + .retry(&backoff) + .notify(|err, _| warn!(target: "reth::cli", "Error resolving peer domain: {err}. Retrying...")) + .await + })).await?; + self.attachment.toml_config.peers.trusted_nodes.extend(resolved); } Ok(self) } @@ -235,7 +252,7 @@ impl LaunchContextWith> { /// - Making sure the ETL dir is set to the datadir /// - RPC settings are adjusted to the correct port pub fn with_adjusted_configs(self) -> Self { - self.ensure_etl_datadir().with_adjusted_rpc_instance_ports() + self.ensure_etl_datadir().with_adjusted_instance_ports() } /// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to @@ -249,7 +266,7 @@ impl LaunchContextWith> { } /// Change rpc port numbers based on the instance number. - pub fn with_adjusted_rpc_instance_ports(mut self) -> Self { + pub fn with_adjusted_instance_ports(mut self) -> Self { self.node_config_mut().adjust_instance_ports(); self } @@ -304,9 +321,9 @@ impl LaunchContextWith> { self.toml_config().prune.clone().or_else(|| self.node_config().prune_config()) } - /// Returns the configured [`PruneModes`] - pub fn prune_modes(&self) -> Option { - self.prune_config().map(|config| config.segments) + /// Returns the configured [`PruneModes`], returning the default if no config was available. + pub fn prune_modes(&self) -> PruneModes { + self.prune_config().map(|config| config.segments).unwrap_or_default() } /// Returns an initialized [`PrunerBuilder`] based on the configured [`PruneConfig`] @@ -316,16 +333,6 @@ impl LaunchContextWith> { .timeout(PrunerBuilder::DEFAULT_TIMEOUT) } - /// Returns the initial pipeline target, based on whether or not the node is running in - /// `debug.tip` mode, `debug.continuous` mode, or neither. - /// - /// If running in `debug.tip` mode, the configured tip is returned. - /// Otherwise, if running in `debug.continuous` mode, the genesis hash is returned. - /// Otherwise, `None` is returned. This is what the node will do by default. - pub fn initial_pipeline_target(&self) -> Option { - self.node_config().initial_pipeline_target(self.genesis_hash()) - } - /// Loads the JWT secret for the engine API pub fn auth_jwt_secret(&self) -> eyre::Result { let default_jwt_path = self.data_dir().jwt(); @@ -358,6 +365,7 @@ where self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, ) + .with_prune_modes(self.prune_modes()) .with_static_files_metrics(); let has_receipt_pruning = @@ -377,24 +385,23 @@ where info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); + let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); + // Builds an unwind-only pipeline let pipeline = Pipeline::builder() .add_stages(DefaultStages::new( factory.clone(), - HeaderSyncMode::Continuous, + tip_rx, Arc::new(EthBeaconConsensus::new(self.chain_spec())), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), NoopBlockExecutorProvider::default(), self.toml_config().stages.clone(), - self.prune_modes().unwrap_or_default(), + self.prune_modes(), )) .build( factory.clone(), - StaticFileProducer::new( - factory.clone(), - self.prune_modes().unwrap_or_default(), - ), + StaticFileProducer::new(factory.clone(), self.prune_modes()), ); // Unwinds to block @@ -447,12 +454,23 @@ where self.right().static_file_provider() } - /// Creates a new [`StaticFileProducer`] with the attached database. - pub fn static_file_producer(&self) -> StaticFileProducer { - StaticFileProducer::new( - self.provider_factory().clone(), - self.prune_modes().unwrap_or_default(), - ) + /// Convenience function to [`Self::start_prometheus_endpoint`] + pub async fn with_prometheus(self) -> eyre::Result { + self.start_prometheus_endpoint().await?; + Ok(self) + } + + /// Starts the prometheus endpoint. + pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + let prometheus_handle = self.node_config().install_prometheus_recorder()?; + self.node_config() + .start_metrics_endpoint( + prometheus_handle, + self.database().clone(), + self.static_file_provider(), + self.task_executor().clone(), + ) + .await } /// Convenience function to [`Self::init_genesis`] @@ -466,32 +484,93 @@ where init_genesis(self.provider_factory().clone()) } - /// Returns the max block that the node should run to, looking it up from the network if - /// necessary - pub async fn max_block(&self, client: C) -> eyre::Result> + /// Creates a new `WithMeteredProvider` container and attaches it to the + /// launch context. + pub fn with_metrics(self) -> LaunchContextWith>> { + let (metrics_sender, metrics_receiver) = unbounded_channel(); + + let with_metrics = + WithMeteredProvider { provider_factory: self.right().clone(), metrics_sender }; + + debug!(target: "reth::cli", "Spawning stages metrics listener task"); + let sync_metrics_listener = reth_stages::MetricsListener::new(metrics_receiver); + self.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); + + LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| with_metrics), + } + } +} + +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, +{ + /// Returns the configured `ProviderFactory`. + const fn provider_factory(&self) -> &ProviderFactory { + &self.right().provider_factory + } + + /// Returns the metrics sender. + fn sync_metrics_tx(&self) -> UnboundedSender { + self.right().metrics_sender.clone() + } + + /// Creates a `BlockchainProvider` and attaches it to the launch context. + pub fn with_blockchain_db( + self, + ) -> eyre::Result>>> where - C: HeadersClient, + T: FullNodeTypes::DB>>, { - self.node_config().max_block(client, self.provider_factory().clone()).await + let tree_config = BlockchainTreeConfig::default(); + + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); + + let blockchain_db = BlockchainProvider::new( + self.provider_factory().clone(), + Arc::new(NoopBlockchainTree::with_canon_state_notifications( + canon_state_notification_sender.clone(), + )), + )?; + + let metered_providers = WithMeteredProviders { + db_provider_container: WithMeteredProvider { + provider_factory: self.provider_factory().clone(), + metrics_sender: self.sync_metrics_tx(), + }, + blockchain_db, + tree_config, + canon_state_notification_sender, + // we store here a reference to T. + phantom_data: PhantomData, + }; + + let ctx = LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| metered_providers), + }; + + Ok(ctx) } +} - /// Convenience function to [`Self::start_prometheus_endpoint`] - pub async fn with_prometheus(self) -> eyre::Result { - self.start_prometheus_endpoint().await?; - Ok(self) +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, + T: FullNodeTypes>, +{ + /// Returns access to the underlying database. + pub fn database(&self) -> &DB { + self.provider_factory().db_ref() } - /// Starts the prometheus endpoint. - pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { - let prometheus_handle = self.node_config().install_prometheus_recorder()?; - self.node_config() - .start_metrics_endpoint( - prometheus_handle, - self.database().clone(), - self.static_file_provider(), - self.task_executor().clone(), - ) - .await + /// Returns the configured `ProviderFactory`. + pub const fn provider_factory(&self) -> &ProviderFactory { + &self.right().db_provider_container.provider_factory } /// Fetches the head block from the database. @@ -502,6 +581,166 @@ where .lookup_head(self.provider_factory().clone()) .wrap_err("the head block is missing") } + + /// Returns the metrics sender. + pub fn sync_metrics_tx(&self) -> UnboundedSender { + self.right().db_provider_container.metrics_sender.clone() + } + + /// Returns a reference to the `BlockchainProvider`. + pub const fn blockchain_db(&self) -> &BlockchainProvider { + &self.right().blockchain_db + } + + /// Returns a reference to the `BlockchainTreeConfig`. + pub const fn tree_config(&self) -> &BlockchainTreeConfig { + &self.right().tree_config + } + + /// Returns the `CanonStateNotificationSender`. + pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { + self.right().canon_state_notification_sender.clone() + } + + /// Creates a `NodeAdapter` and attaches it to the launch context. + pub async fn with_components( + self, + components_builder: CB, + on_component_initialized: Box< + dyn OnComponentInitializedHook>, + >, + ) -> eyre::Result>>> + where + CB: NodeComponentsBuilder, + { + // fetch the head block from the database + let head = self.lookup_head()?; + + let builder_ctx = BuilderContext::new( + head, + self.blockchain_db().clone(), + self.task_executor().clone(), + self.configs().clone(), + ); + + debug!(target: "reth::cli", "creating components"); + let components = components_builder.build_components(&builder_ctx).await?; + + let consensus: Arc = Arc::new(components.consensus().clone()); + + let tree_externals = TreeExternals::new( + self.provider_factory().clone(), + consensus.clone(), + components.block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, *self.tree_config(), self.prune_modes())? + .with_sync_metrics_tx(self.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(self.canon_state_notification_sender()); + + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + // Replace the tree component with the actual tree + let blockchain_db = self.blockchain_db().clone().with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + + let node_adapter = NodeAdapter { + components, + task_executor: self.task_executor().clone(), + provider: blockchain_db.clone(), + }; + + debug!(target: "reth::cli", "calling on_component_initialized hook"); + on_component_initialized.on_event(node_adapter.clone())?; + + let components_container = WithComponents { + db_provider_container: WithMeteredProvider { + provider_factory: self.provider_factory().clone(), + metrics_sender: self.sync_metrics_tx(), + }, + blockchain_db, + tree_config: self.right().tree_config, + node_adapter, + head, + consensus, + }; + + let ctx = LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| components_container), + }; + + Ok(ctx) + } +} + +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, + T: FullNodeTypes>, + CB: NodeComponentsBuilder, +{ + /// Returns the configured `ProviderFactory`. + pub const fn provider_factory(&self) -> &ProviderFactory { + &self.right().db_provider_container.provider_factory + } + + /// Returns the max block that the node should run to, looking it up from the network if + /// necessary + pub async fn max_block(&self, client: C) -> eyre::Result> + where + C: HeadersClient, + { + self.node_config().max_block(client, self.provider_factory().clone()).await + } + + /// Returns the static file provider to interact with the static files. + pub fn static_file_provider(&self) -> StaticFileProvider { + self.provider_factory().static_file_provider() + } + + /// Creates a new [`StaticFileProducer`] with the attached database. + pub fn static_file_producer(&self) -> StaticFileProducer { + StaticFileProducer::new(self.provider_factory().clone(), self.prune_modes()) + } + + /// Returns the current head block. + pub const fn head(&self) -> Head { + self.right().head + } + + /// Returns the configured `NodeAdapter`. + pub const fn node_adapter(&self) -> &NodeAdapter { + &self.right().node_adapter + } + + /// Returns a reference to the `BlockchainProvider`. + pub const fn blockchain_db(&self) -> &BlockchainProvider { + &self.right().blockchain_db + } + + /// Returns the configured `Consensus`. + pub fn consensus(&self) -> Arc { + self.right().consensus.clone() + } + + /// Returns the metrics sender. + pub fn sync_metrics_tx(&self) -> UnboundedSender { + self.right().db_provider_container.metrics_sender.clone() + } + + /// Returns a reference to the `BlockchainTreeConfig`. + pub const fn tree_config(&self) -> &BlockchainTreeConfig { + &self.right().tree_config + } + + /// Returns the node adapter components. + pub const fn components(&self) -> &CB::Components { + &self.node_adapter().components + } } /// Joins two attachments together. @@ -564,6 +803,42 @@ pub struct WithConfigs { pub toml_config: reth_config::Config, } +/// Helper container type to bundle the [`ProviderFactory`] and the metrics +/// sender. +#[derive(Debug, Clone)] +pub struct WithMeteredProvider { + provider_factory: ProviderFactory, + metrics_sender: UnboundedSender, +} + +/// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] +/// and a metrics sender. +#[allow(missing_debug_implementations)] +pub struct WithMeteredProviders { + db_provider_container: WithMeteredProvider, + blockchain_db: BlockchainProvider, + canon_state_notification_sender: CanonStateNotificationSender, + tree_config: BlockchainTreeConfig, + // this field is used to store a reference to the FullNodeTypes so that we + // can build the components in `with_components` method. + phantom_data: PhantomData, +} + +/// Helper container to bundle the metered providers container and [`NodeAdapter`]. +#[allow(missing_debug_implementations)] +pub struct WithComponents +where + T: FullNodeTypes>, + CB: NodeComponentsBuilder, +{ + db_provider_container: WithMeteredProvider, + tree_config: BlockchainTreeConfig, + blockchain_db: BlockchainProvider, + node_adapter: NodeAdapter, + head: Head, + consensus: Arc, +} + #[cfg(test)] mod tests { use super::{LaunchContext, NodeConfig}; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index cd9971ad7806..328a77e0db87 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -5,26 +5,20 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, node::FullNode, - BuilderContext, NodeBuilderWithComponents, NodeHandle, + NodeBuilderWithComponents, NodeHandle, }; use futures::{future::Either, stream, stream_select, StreamExt}; -use reth_auto_seal_consensus::AutoSealConsensus; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensusEngine, EthBeaconConsensus, + BeaconConsensusEngine, }; -use reth_blockchain_tree::{ - noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, - TreeExternals, -}; -use reth_consensus::Consensus; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; +use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::NetworkEvents; -use reth_node_api::{FullNodeComponents, FullNodeTypes}; +use reth_node_api::FullNodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, - engine::EngineMessageStreamExt, exit::NodeExitFuture, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; @@ -104,12 +98,13 @@ where add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, config, } = target; + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; // setup the launch context let ctx = ctx .with_configured_globals() // load the toml config - .with_loaded_toml_config(config).await? + .with_loaded_toml_config(config)? // add resolved peers .with_resolved_peers().await? // attach the database @@ -128,85 +123,25 @@ where .with_genesis()? .inspect(|this| { info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); - }); - - // setup the consensus instance - let consensus: Arc = if ctx.is_dev() { - Arc::new(AutoSealConsensus::new(ctx.chain_spec())) - } else { - Arc::new(EthBeaconConsensus::new(ctx.chain_spec())) - }; - - debug!(target: "reth::cli", "Spawning stages metrics listener task"); - let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); - let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); - ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); - - // fetch the head block from the database - let head = ctx.lookup_head()?; - - // Configure the blockchain tree for the node - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - - let blockchain_db = BlockchainProvider::new( - ctx.provider_factory().clone(), - Arc::new(NoopBlockchainTree::with_canon_state_notifications( - canon_state_notification_sender.clone(), - )), - )?; - - let builder_ctx = BuilderContext::new( - head, - blockchain_db.clone(), - ctx.task_executor().clone(), - ctx.configs().clone(), - ); - - debug!(target: "reth::cli", "creating components"); - let components = components_builder.build_components(&builder_ctx).await?; - - let tree_externals = TreeExternals::new( - ctx.provider_factory().clone(), - consensus.clone(), - components.block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? - .with_sync_metrics_tx(sync_metrics_tx.clone()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(canon_state_notification_sender); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Replace the tree component with the actual tree - let blockchain_db = blockchain_db.with_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); - - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - let node_adapter = NodeAdapter { - components, - task_executor: ctx.task_executor().clone(), - provider: blockchain_db.clone(), - }; - - debug!(target: "reth::cli", "calling on_component_initialized hook"); - on_component_initialized.on_event(node_adapter.clone())?; + }) + .with_metrics() + // passing FullNodeTypes as type parameter here so that we can build + // later the components. + .with_blockchain_db::()? + .with_components(components_builder, on_component_initialized).await?; // spawn exexs - let exex_manager_handle = - ExExLauncher::new(head, node_adapter.clone(), installed_exex, ctx.configs().clone()) - .launch() - .await; + let exex_manager_handle = ExExLauncher::new( + ctx.head(), + ctx.node_adapter().clone(), + installed_exex, + ctx.configs().clone(), + ) + .launch() + .await; // create pipeline - let network_client = node_adapter.network().fetch_client().await?; + let network_client = ctx.components().network().fetch_client().await?; let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); let node_config = ctx.node_config(); @@ -241,34 +176,32 @@ where // install auto-seal let mining_mode = - ctx.dev_mining_mode(node_adapter.components.pool().pending_transactions_listener()); + ctx.dev_mining_mode(ctx.components().pool().pending_transactions_listener()); info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( ctx.chain_spec(), - blockchain_db.clone(), - node_adapter.components.pool().clone(), + ctx.blockchain_db().clone(), + ctx.components().pool().clone(), consensus_engine_tx.clone(), mining_mode, - node_adapter.components.block_executor().clone(), + ctx.components().block_executor().clone(), ) .build(); let pipeline = crate::setup::build_networked_pipeline( - ctx.node_config(), &ctx.toml_config().stages, client.clone(), - Arc::clone(&consensus), + ctx.consensus(), ctx.provider_factory().clone(), ctx.task_executor(), - sync_metrics_tx, + ctx.sync_metrics_tx(), ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.block_executor().clone(), + ctx.components().block_executor().clone(), pipeline_exex_handle, - ) - .await?; + )?; let pipeline_events = pipeline.events(); task.set_pipeline_events(pipeline_events); @@ -278,30 +211,28 @@ where (pipeline, Either::Left(client)) } else { let pipeline = crate::setup::build_networked_pipeline( - ctx.node_config(), &ctx.toml_config().stages, network_client.clone(), - Arc::clone(&consensus), + ctx.consensus(), ctx.provider_factory().clone(), ctx.task_executor(), - sync_metrics_tx, + ctx.sync_metrics_tx(), ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.block_executor().clone(), + ctx.components().block_executor().clone(), pipeline_exex_handle, - ) - .await?; + )?; (pipeline, Either::Right(network_client.clone())) }; let pipeline_events = pipeline.events(); - let initial_target = ctx.initial_pipeline_target(); + let initial_target = ctx.node_config().debug.tip; let mut pruner_builder = - ctx.pruner_builder().max_reorg_depth(tree_config.max_reorg_depth() as usize); + ctx.pruner_builder().max_reorg_depth(ctx.tree_config().max_reorg_depth() as usize); if let Some(exex_manager_handle) = &exex_manager_handle { pruner_builder = pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); @@ -317,12 +248,11 @@ where let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( client, pipeline, - blockchain_db.clone(), + ctx.blockchain_db().clone(), Box::new(ctx.task_executor().clone()), - Box::new(node_adapter.components.network().clone()), + Box::new(ctx.components().network().clone()), max_block, - ctx.node_config().debug.continuous, - node_adapter.components.payload_builder().clone(), + ctx.components().payload_builder().clone(), initial_target, reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, consensus_engine_tx, @@ -332,12 +262,12 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - node_adapter.components.network().event_listener().map(Into::into), + ctx.components().network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( - ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) + ConsensusLayerHealthEvents::new(Box::new(ctx.blockchain_db().clone())) .map(Into::into), ) } else { @@ -349,8 +279,8 @@ where ctx.task_executor().spawn_critical( "events task", node::handle_events( - Some(node_adapter.components.network().clone()), - Some(head.number), + Some(Box::new(ctx.components().network().clone())), + Some(ctx.head().number), events, database.clone(), ), @@ -363,10 +293,10 @@ where commit: VERGEN_GIT_SHA.to_string(), }; let engine_api = EngineApi::new( - blockchain_db.clone(), + ctx.blockchain_db().clone(), ctx.chain_spec(), beacon_engine_handle, - node_adapter.components.payload_builder().clone().into(), + ctx.components().payload_builder().clone().into(), Box::new(ctx.task_executor().clone()), client, ); @@ -376,8 +306,8 @@ where let jwt_secret = ctx.auth_jwt_secret()?; // Start RPC servers - let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( - node_adapter.clone(), + let (rpc_server_handles, rpc_registry) = crate::rpc::launch_rpc_servers( + ctx.node_adapter().clone(), engine_api, ctx.node_config(), jwt_secret, @@ -441,12 +371,12 @@ where } let full_node = FullNode { - evm_config: node_adapter.components.evm_config().clone(), - block_executor: node_adapter.components.block_executor().clone(), - pool: node_adapter.components.pool().clone(), - network: node_adapter.components.network().clone(), - provider: node_adapter.provider.clone(), - payload_builder: node_adapter.components.payload_builder().clone(), + evm_config: ctx.components().evm_config().clone(), + block_executor: ctx.components().block_executor().clone(), + pool: ctx.components().pool().clone(), + network: ctx.components().network().clone(), + provider: ctx.node_adapter().provider.clone(), + payload_builder: ctx.components().payload_builder().clone(), task_executor: ctx.task_executor().clone(), rpc_server_handles, rpc_registry, @@ -457,7 +387,10 @@ where on_node_started.on_event(full_node.clone())?; let handle = NodeHandle { - node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), + node_exit_future: NodeExitFuture::new( + async { Ok(rx.await??) }, + full_node.config.debug.terminate, + ), node: full_node, }; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 681db4148cd1..fe8d99ed6090 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,4 +1,5 @@ use crate::rpc::{RethRpcServerHandles, RpcRegistry}; +use reth_chainspec::ChainSpec; use reth_network::NetworkHandle; use reth_node_api::FullNodeComponents; use reth_node_core::{ @@ -7,11 +8,10 @@ use reth_node_core::{ rpc::api::EngineApiClient, }; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::ChainSpec; use reth_provider::ChainSpecProvider; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; // re-export the node api types use crate::components::NodeComponentsBuilder; @@ -28,10 +28,48 @@ pub trait Node: NodeTypes + Clone { fn components_builder(self) -> Self::ComponentsBuilder; } +/// A [`Node`] type builder +#[derive(Clone, Default, Debug)] +pub struct AnyNode(PhantomData, C); + +impl AnyNode { + /// Configures the types of the node. + pub fn types(self) -> AnyNode { + AnyNode::(PhantomData::, self.1) + } + + /// Sets the node components builder. + pub fn components_builder(self, value: T) -> AnyNode { + AnyNode::(PhantomData::, value) + } +} + +impl NodeTypes for AnyNode +where + N: FullNodeTypes, + C: NodeComponentsBuilder + Sync + Unpin + 'static, +{ + type Primitives = N::Primitives; + + type Engine = N::Engine; +} + +impl Node for AnyNode +where + N: FullNodeTypes + Clone, + C: NodeComponentsBuilder + Clone + Sync + Unpin + 'static, +{ + type ComponentsBuilder = C; + + fn components_builder(self) -> Self::ComponentsBuilder { + self.1 + } +} + /// The launched node with all components including RPC handlers. /// /// This can be used to interact with the launched node. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct FullNode { /// The evm configuration. pub evm_config: Node::Evm, @@ -95,21 +133,3 @@ impl FullNode { self.auth_server_handle().ipc_client().await } } - -impl Clone for FullNode { - fn clone(&self) -> Self { - Self { - evm_config: self.evm_config.clone(), - block_executor: self.block_executor.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - provider: self.provider.clone(), - payload_builder: self.payload_builder.clone(), - task_executor: self.task_executor.clone(), - rpc_server_handles: self.rpc_server_handles.clone(), - rpc_registry: self.rpc_registry.clone(), - config: self.config.clone(), - data_dir: self.data_dir.clone(), - } - } -} diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 9a6ada8f916d..03ae899cba8b 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1,22 +1,24 @@ //! Builder support for rpc components. +use std::{ + fmt, + ops::{Deref, DerefMut}, +}; + use futures::TryFutureExt; use reth_network::NetworkHandle; use reth_node_api::FullNodeComponents; use reth_node_core::{node_config::NodeConfig, rpc::api::EngineApiServer}; use reth_payload_builder::PayloadBuilderHandle; +use reth_rpc::eth::EthApi; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, - RethModuleRegistry, RpcModuleBuilder, RpcServerHandle, TransportRpcModules, + EthApiBuild, RpcModuleBuilder, RpcRegistryInner, RpcServerHandle, TransportRpcModules, }; use reth_rpc_layer::JwtSecret; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use std::{ - fmt, - ops::{Deref, DerefMut}, -}; /// Contains the handles to the spawned RPC servers. /// @@ -145,27 +147,28 @@ impl ExtendRpcModules for () { } } -/// Helper wrapper type to encapsulate the [`RethModuleRegistry`] over components trait. +/// Helper wrapper type to encapsulate the [`RpcRegistryInner`] over components trait. #[derive(Debug)] +#[allow(clippy::type_complexity)] pub struct RpcRegistry { - pub(crate) registry: RethModuleRegistry< + pub(crate) registry: RpcRegistryInner< Node::Provider, Node::Pool, NetworkHandle, TaskExecutor, Node::Provider, - Node::Evm, + EthApi, >, } impl Deref for RpcRegistry { - type Target = RethModuleRegistry< + type Target = RpcRegistryInner< Node::Provider, Node::Pool, NetworkHandle, TaskExecutor, Node::Provider, - Node::Evm, + EthApi, >; fn deref(&self) -> &Self::Target { @@ -185,12 +188,13 @@ impl Clone for RpcRegistry { } } -/// Helper container to encapsulate [`RethModuleRegistry`], [`TransportRpcModules`] and +/// Helper container to encapsulate [`RpcRegistryInner`], [`TransportRpcModules`] and /// [`AuthRpcModule`]. /// /// This can be used to access installed modules, or create commonly used handlers like -/// [`reth_rpc::EthApi`], and ultimately merge additional rpc handler into the configured transport -/// modules [`TransportRpcModules`] as well as configured authenticated methods [`AuthRpcModule`]. +/// [`reth_rpc::eth::EthApi`], and ultimately merge additional rpc handler into the configured +/// transport modules [`TransportRpcModules`] as well as configured authenticated methods +/// [`AuthRpcModule`]. #[allow(missing_debug_implementations)] pub struct RpcContext<'a, Node: FullNodeComponents> { /// The node components. @@ -201,7 +205,7 @@ pub struct RpcContext<'a, Node: FullNodeComponents> { /// A Helper type the holds instances of the configured modules. /// - /// This provides easy access to rpc handlers, such as [`RethModuleRegistry::eth_api`]. + /// This provides easy access to rpc handlers, such as [`RpcRegistryInner::eth_api`]. pub registry: &'a mut RpcRegistry, /// Holds installed modules per transport type. /// @@ -271,7 +275,7 @@ where .with_events(node.provider().clone()) .with_executor(node.task_executor().clone()) .with_evm_config(node.evm_config().clone()) - .build_with_auth_server(module_config, engine_api); + .build_with_auth_server(module_config, engine_api, EthApiBuild::build); let mut registry = RpcRegistry { registry }; let ctx = RpcContext { @@ -285,7 +289,8 @@ where extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); - let launch_rpc = modules.clone().start_server(server_config).map_ok(|handle| { + let cloned_modules = modules.clone(); + let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { if let Some(path) = handle.ipc_endpoint() { info!(target: "reth::cli", %path, "RPC IPC server started"); } diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index b4a6fef16b64..294d7a8f68a0 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -13,11 +13,8 @@ use reth_network_p2p::{ bodies::{client::BodiesClient, downloader::BodyDownloader}, headers::{client::HeadersClient, downloader::HeaderDownloader}, }; -use reth_node_core::{ - node_config::NodeConfig, - primitives::{BlockNumber, B256}, -}; -use reth_provider::{HeaderSyncMode, ProviderFactory}; +use reth_node_core::primitives::{BlockNumber, B256}; +use reth_provider::ProviderFactory; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; @@ -27,8 +24,7 @@ use tokio::sync::watch; /// Constructs a [Pipeline] that's wired to the network #[allow(clippy::too_many_arguments)] -pub async fn build_networked_pipeline( - node_config: &NodeConfig, +pub fn build_networked_pipeline( config: &StageConfig, client: Client, consensus: Arc, @@ -56,7 +52,6 @@ where .into_task_with(task_executor); let pipeline = build_pipeline( - node_config, provider_factory, config, header_downloader, @@ -68,16 +63,14 @@ where static_file_producer, executor, exex_manager_handle, - ) - .await?; + )?; Ok(pipeline) } /// Builds the [Pipeline] with the given [`ProviderFactory`] and downloaders. #[allow(clippy::too_many_arguments)] -pub async fn build_pipeline( - node_config: &NodeConfig, +pub fn build_pipeline( provider_factory: ProviderFactory, stage_config: &StageConfig, header_downloader: H, @@ -107,18 +100,13 @@ where let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); - let header_mode = if node_config.debug.continuous { - HeaderSyncMode::Continuous - } else { - HeaderSyncMode::Tip(tip_rx) - }; let pipeline = builder .with_tip_sender(tip_tx) .with_metrics_tx(metrics_tx.clone()) .add_stages( DefaultStages::new( provider_factory.clone(), - header_mode, + tip_rx, Arc::clone(&consensus), header_downloader, body_downloader, diff --git a/crates/node-core/Cargo.toml b/crates/node/core/Cargo.toml similarity index 88% rename from crates/node-core/Cargo.toml rename to crates/node/core/Cargo.toml index 27271cdc4702..997aacc6330c 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -12,7 +12,9 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true +reth-cli-util.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true @@ -20,31 +22,30 @@ reth-storage-errors.workspace = true reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true -reth-rpc.workspace = true +reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } +reth-rpc-eth-api = { workspace = true, features = ["client"] } reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true -reth-engine-primitives.workspace = true +reth-network-peers.workspace = true reth-tasks.workspace = true reth-consensus-common.workspace = true -reth-beacon-consensus.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true # ethereum +alloy-genesis.workspace = true alloy-rpc-types-engine.workspace = true # async tokio.workspace = true -tokio-util.workspace = true -pin-project.workspace = true # metrics reth-metrics.workspace = true @@ -57,8 +58,7 @@ metrics-util.workspace = true eyre.workspace = true clap = { workspace = true, features = ["derive"] } humantime.workspace = true -thiserror.workspace = true -const-str = "0.5.6" +const_format.workspace = true rand.workspace = true derive_more.workspace = true once_cell.workspace = true @@ -66,7 +66,6 @@ once_cell.workspace = true # io dirs-next = "2.0.0" shellexpand = "3.0.0" -serde.workspace = true serde_json.workspace = true # http/rpc @@ -96,15 +95,14 @@ procfs = "0.16.0" [dev-dependencies] # test vectors generation proptest.workspace = true -reth-network-peers.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-rpc/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-beacon-consensus/optimism", + "reth-rpc-eth-api/optimism", + "reth-rpc-eth-types/optimism", ] jemalloc = ["dep:tikv-jemalloc-ctl"] diff --git a/crates/node-core/build.rs b/crates/node/core/build.rs similarity index 90% rename from crates/node-core/build.rs rename to crates/node/core/build.rs index 043505cdfb67..1a78793a4746 100644 --- a/crates/node-core/build.rs +++ b/crates/node/core/build.rs @@ -8,19 +8,20 @@ fn main() -> Result<(), Box> { EmitBuilder::builder() .git_describe(false, true, None) .git_dirty(true) - .git_sha(true) + .git_sha(false) .build_timestamp() .cargo_features() .cargo_target_triple() .emit_and_set()?; let sha = env::var("VERGEN_GIT_SHA")?; + let sha_short = &sha[0..7]; let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true"; // > git describe --always --tags // if not on a tag: v0.2.0-beta.3-82-g1939939b // if on a tag: v0.2.0-beta.3 - let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha}")); + let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}")); let is_dev = is_dirty || not_on_tag; println!("cargo:rustc-env=RETH_VERSION_SUFFIX={}", if is_dev { "-dev" } else { "" }); Ok(()) diff --git a/crates/node/core/src/args/benchmark_args.rs b/crates/node/core/src/args/benchmark_args.rs new file mode 100644 index 000000000000..1ff49c9c84d1 --- /dev/null +++ b/crates/node/core/src/args/benchmark_args.rs @@ -0,0 +1,62 @@ +//! clap [Args](clap::Args) for benchmark configuration + +use clap::Args; +use std::path::PathBuf; + +/// Parameters for benchmark configuration +#[derive(Debug, Args, PartialEq, Eq, Default, Clone)] +#[command(next_help_heading = "Benchmark")] +pub struct BenchmarkArgs { + /// Run the benchmark from a specific block. + #[arg(long, verbatim_doc_comment)] + pub from: Option, + + /// Run the benchmark to a specific block. + #[arg(long, verbatim_doc_comment)] + pub to: Option, + + /// Path to a JWT secret to use for the authenticated engine-API RPC server. + /// + /// This will perform JWT authentication for all requests to the given engine RPC url. + /// + /// If no path is provided, a secret will be generated and stored in the datadir under + /// `

//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. + #[arg(long = "jwtsecret", value_name = "PATH", global = true, required = false)] + pub auth_jwtsecret: Option, + + /// The RPC url to use for sending engine requests. + #[arg( + long, + value_name = "ENGINE_RPC_URL", + verbatim_doc_comment, + default_value = "http://localhost:8551" + )] + pub engine_rpc_url: String, + + /// The path to the output directory for granular benchmark results. + #[arg(long, short, value_name = "BENCHMARK_OUTPUT", verbatim_doc_comment)] + pub output: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + /// A helper type to parse Args more easily + #[derive(Parser)] + struct CommandParser { + #[command(flatten)] + args: T, + } + + #[test] + fn test_parse_benchmark_args() { + let default_args = BenchmarkArgs { + engine_rpc_url: "http://localhost:8551".to_string(), + ..Default::default() + }; + let args = CommandParser::::parse_from(["reth-bench"]).args; + assert_eq!(args, default_args); + } +} diff --git a/crates/node-core/src/args/database.rs b/crates/node/core/src/args/database.rs similarity index 100% rename from crates/node-core/src/args/database.rs rename to crates/node/core/src/args/database.rs diff --git a/crates/node-core/src/args/datadir_args.rs b/crates/node/core/src/args/datadir_args.rs similarity index 98% rename from crates/node-core/src/args/datadir_args.rs rename to crates/node/core/src/args/datadir_args.rs index 3d8dc490d292..85adc49a4aed 100644 --- a/crates/node-core/src/args/datadir_args.rs +++ b/crates/node/core/src/args/datadir_args.rs @@ -2,7 +2,7 @@ use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath}; use clap::Args; -use reth_primitives::Chain; +use reth_chainspec::Chain; use std::path::PathBuf; /// Parameters for datadir configuration diff --git a/crates/node-core/src/args/debug.rs b/crates/node/core/src/args/debug.rs similarity index 88% rename from crates/node-core/src/args/debug.rs rename to crates/node/core/src/args/debug.rs index b132eb3a611d..432bacf7d683 100644 --- a/crates/node-core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -8,12 +8,6 @@ use std::path::PathBuf; #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] #[command(next_help_heading = "Debug")] pub struct DebugArgs { - /// Prompt the downloader to download blocks one at a time. - /// - /// NOTE: This is for testing purposes only. - #[arg(long = "debug.continuous", help_heading = "Debug", conflicts_with = "tip")] - pub continuous: bool, - /// Flag indicating whether the node should be terminated after the pipeline sync. #[arg(long = "debug.terminate", help_heading = "Debug")] pub terminate: bool, @@ -21,7 +15,7 @@ pub struct DebugArgs { /// Set the chain tip manually for testing purposes. /// /// NOTE: This is a temporary flag - #[arg(long = "debug.tip", help_heading = "Debug", conflicts_with = "continuous")] + #[arg(long = "debug.tip", help_heading = "Debug")] pub tip: Option, /// Runs the sync only up to the specified block. diff --git a/crates/node-core/src/args/dev.rs b/crates/node/core/src/args/dev.rs similarity index 100% rename from crates/node-core/src/args/dev.rs rename to crates/node/core/src/args/dev.rs diff --git a/crates/node-core/src/args/gas_price_oracle.rs b/crates/node/core/src/args/gas_price_oracle.rs similarity index 98% rename from crates/node-core/src/args/gas_price_oracle.rs rename to crates/node/core/src/args/gas_price_oracle.rs index 5148fdca34b2..abdd8e14214f 100644 --- a/crates/node-core/src/args/gas_price_oracle.rs +++ b/crates/node/core/src/args/gas_price_oracle.rs @@ -1,6 +1,6 @@ use crate::primitives::U256; use clap::Args; -use reth_rpc::eth::gas_oracle::GasPriceOracleConfig; +use reth_rpc_eth_types::GasPriceOracleConfig; use reth_rpc_server_types::constants::gas_oracle::{ DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, DEFAULT_MAX_GAS_PRICE, diff --git a/crates/node-core/src/args/log.rs b/crates/node/core/src/args/log.rs similarity index 98% rename from crates/node-core/src/args/log.rs rename to crates/node/core/src/args/log.rs index 9f77e555582b..aa2e0cf5f1a3 100644 --- a/crates/node-core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -92,6 +92,8 @@ impl LogArgs { } /// Initializes tracing with the configured options from cli args. + /// + /// Returns the file worker guard, and the file name, if a file worker was configured. pub fn init_tracing(&self) -> eyre::Result> { let mut tracer = RethTracer::new(); diff --git a/crates/node-core/src/args/mod.rs b/crates/node/core/src/args/mod.rs similarity index 92% rename from crates/node-core/src/args/mod.rs rename to crates/node/core/src/args/mod.rs index 04fc02217d08..7d1f61903ffb 100644 --- a/crates/node-core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -24,9 +24,6 @@ pub use database::DatabaseArgs; mod log; pub use log::{ColorMode, LogArgs}; -mod secret_key; -pub use secret_key::{get_secret_key, SecretKeyError}; - /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs; @@ -55,6 +52,10 @@ pub use pruning::PruningArgs; mod datadir_args; pub use datadir_args::DatadirArgs; +/// BenchmarkArgs struct for configuring the benchmark to run +mod benchmark_args; +pub use benchmark_args::BenchmarkArgs; + pub mod utils; pub mod types; diff --git a/crates/node-core/src/args/network.rs b/crates/node/core/src/args/network.rs similarity index 78% rename from crates/node-core/src/args/network.rs rename to crates/node/core/src/args/network.rs index 93e4d8db1b12..39af9480d05b 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -2,10 +2,11 @@ use crate::version::P2P_CLIENT_VERSION; use clap::Args; +use reth_chainspec::ChainSpec; use reth_config::Config; -use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; +use reth_discv4::{NodeRecord, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ - DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, + discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; @@ -17,10 +18,10 @@ use reth_network::{ }, HelloMessageWithProtocols, NetworkConfigBuilder, SessionsConfig, }; -use reth_primitives::{mainnet_nodes, ChainSpec, TrustedPeer}; +use reth_network_peers::{mainnet_nodes, TrustedPeer}; use secp256k1::SecretKey; use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, ops::Not, path::PathBuf, sync::Arc, @@ -122,6 +123,12 @@ impl NetworkArgs { /// /// The `default_peers_file` will be used as the default location to store the persistent peers /// file if `no_persist_peers` is false, and there is no provided `peers_file`. + /// + /// Configured Bootnodes are prioritized, if unset, the chain spec bootnodes are used + /// Priority order for bootnodes configuration: + /// 1. --bootnodes flag + /// 2. Network preset flags (e.g. --holesky) + /// 3. default to mainnet nodes pub fn network_config( &self, config: &Config, @@ -129,7 +136,16 @@ impl NetworkArgs { secret_key: SecretKey, default_peers_file: PathBuf, ) -> NetworkConfigBuilder { - let chain_bootnodes = chain_spec.bootnodes().unwrap_or_else(mainnet_nodes); + let chain_bootnodes = self + .bootnodes + .clone() + .map(|bootnodes| { + bootnodes + .into_iter() + .filter_map(|trusted_peer| trusted_peer.resolve_blocking().ok()) + .collect() + }) + .unwrap_or_else(|| chain_spec.bootnodes().unwrap_or_else(mainnet_nodes)); let peers_file = self.peers_file.clone().unwrap_or(default_peers_file); // Configure peer connections @@ -172,23 +188,17 @@ impl NetworkArgs { // apply discovery settings .apply(|builder| { let rlpx_socket = (self.addr, self.port).into(); - self.discovery.apply_to_builder(builder, rlpx_socket) - }) - // modify discv5 settings if enabled in previous step - .map_discv5_config_builder(|builder| { - let DiscoveryArgs { - discv5_lookup_interval, - discv5_bootstrap_lookup_interval, - discv5_bootstrap_lookup_countdown, - .. - } = self.discovery; - - builder - .add_unsigned_boot_nodes(chain_bootnodes.into_iter()) - .lookup_interval(discv5_lookup_interval) - .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) - .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) + self.discovery.apply_to_builder(builder, rlpx_socket, chain_bootnodes) }) + .listener_addr(SocketAddr::new( + self.addr, // set discovery port based on instance number + self.port, + )) + .discovery_addr(SocketAddr::new( + self.discovery.addr, + // set discovery port based on instance number + self.discovery.port, + )) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -205,11 +215,30 @@ impl NetworkArgs { /// Sets the p2p and discovery ports to zero, allowing the OD to assign a random unused port /// when network components bind to sockets. - pub fn with_unused_ports(mut self) -> Self { + pub const fn with_unused_ports(mut self) -> Self { self = self.with_unused_p2p_port(); self.discovery = self.discovery.with_unused_discovery_port(); self } + + /// Change networking port numbers based on the instance number. + /// Ports are updated to `previous_value + instance - 1` + /// + /// # Panics + /// Warning: if `instance` is zero in debug mode, this will panic. + pub fn adjust_instance_ports(&mut self, instance: u16) { + debug_assert_ne!(instance, 0, "instance must be non-zero"); + self.port += instance - 1; + self.discovery.adjust_instance_ports(instance); + } + + /// Resolve all trusted peers at once + pub async fn resolve_trusted_peers(&self) -> Result, std::io::Error> { + futures::future::try_join_all( + self.trusted_peers.iter().map(|peer| async move { peer.resolve().await }), + ) + .await + } } impl Default for NetworkArgs { @@ -308,6 +337,7 @@ impl DiscoveryArgs { &self, mut network_config_builder: NetworkConfigBuilder, rlpx_tcp_socket: SocketAddr, + boot_nodes: impl IntoIterator, ) -> NetworkConfigBuilder { if self.disable_discovery || self.disable_dns_discovery { network_config_builder = network_config_builder.disable_dns_discovery(); @@ -318,19 +348,72 @@ impl DiscoveryArgs { } if !self.disable_discovery && self.enable_discv5_discovery { - network_config_builder = - network_config_builder.discovery_v5(reth_discv5::Config::builder(rlpx_tcp_socket)); + network_config_builder = network_config_builder + .discovery_v5(self.discovery_v5_builder(rlpx_tcp_socket, boot_nodes)); } network_config_builder } + /// Creates a [`reth_discv5::ConfigBuilder`] filling it with the values from this struct. + pub fn discovery_v5_builder( + &self, + rlpx_tcp_socket: SocketAddr, + boot_nodes: impl IntoIterator, + ) -> reth_discv5::ConfigBuilder { + let Self { + discv5_addr, + discv5_addr_ipv6, + discv5_port, + discv5_port_ipv6, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self; + + // Use rlpx address if none given + let discv5_addr_ipv4 = discv5_addr.or(match rlpx_tcp_socket { + SocketAddr::V4(addr) => Some(*addr.ip()), + SocketAddr::V6(_) => None, + }); + let discv5_addr_ipv6 = discv5_addr_ipv6.or(match rlpx_tcp_socket { + SocketAddr::V4(_) => None, + SocketAddr::V6(addr) => Some(*addr.ip()), + }); + + reth_discv5::Config::builder(rlpx_tcp_socket) + .discv5_config( + reth_discv5::discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, *discv5_port)), + discv5_addr_ipv6.map(|addr| SocketAddrV6::new(addr, *discv5_port_ipv6, 0, 0)), + )) + .build(), + ) + .add_unsigned_boot_nodes(boot_nodes) + .lookup_interval(*discv5_lookup_interval) + .bootstrap_lookup_interval(*discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(*discv5_bootstrap_lookup_countdown) + } + /// Set the discovery port to zero, to allow the OS to assign a random unused port when /// discovery binds to the socket. pub const fn with_unused_discovery_port(mut self) -> Self { self.port = 0; self } + + /// Change networking port numbers based on the instance number. + /// Ports are updated to `previous_value + instance - 1` + /// + /// # Panics + /// Warning: if `instance` is zero in debug mode, this will panic. + pub fn adjust_instance_ports(&mut self, instance: u16) { + debug_assert_ne!(instance, 0, "instance must be non-zero"); + self.port += instance - 1; + self.discv5_port += instance - 1; + self.discv5_port_ipv6 += instance - 1; + } } impl Default for DiscoveryArgs { diff --git a/crates/node-core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs similarity index 97% rename from crates/node-core/src/args/payload_builder.rs rename to crates/node/core/src/args/payload_builder.rs index beb5b921ad32..69883fbf418d 100644 --- a/crates/node-core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,11 +1,9 @@ -use crate::{ - args::utils::parse_duration_from_secs, cli::config::PayloadBuilderConfig, - version::default_extradata, -}; +use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; +use reth_cli_util::parse_duration_from_secs; use reth_primitives::constants::{ ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION, }; diff --git a/crates/node-core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs similarity index 98% rename from crates/node-core/src/args/pruning.rs rename to crates/node/core/src/args/pruning.rs index dc3bdc0c7de8..5e20aef8d727 100644 --- a/crates/node-core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -1,8 +1,8 @@ //! Pruning and full node arguments use clap::Args; +use reth_chainspec::ChainSpec; use reth_config::config::PruneConfig; -use reth_primitives::ChainSpec; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; /// Parameters for pruning and full node diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs similarity index 93% rename from crates/node-core/src/args/rpc_server.rs rename to crates/node/core/src/args/rpc_server.rs index 7ab2dd268fa1..761f0c3f709f 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1,22 +1,22 @@ //! clap [Args](clap::Args) for RPC related arguments. -use crate::args::{ - types::{MaxU32, ZeroAsNoneU64}, - GasPriceOracleArgs, RpcStateCacheArgs, +use std::{ + ffi::OsStr, + net::{IpAddr, Ipv4Addr}, + path::PathBuf, }; + use alloy_rpc_types_engine::JwtSecret; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use rand::Rng; -use reth_rpc::eth::RPC_DEFAULT_GAS_CAP; - use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection}; -use std::{ - ffi::OsStr, - net::{IpAddr, Ipv4Addr}, - path::PathBuf, + +use crate::args::{ + types::{MaxU32, ZeroAsNoneU64}, + GasPriceOracleArgs, RpcStateCacheArgs, }; /// Default max number of subscriptions per connection. @@ -152,10 +152,24 @@ pub struct RpcServerArgs { alias = "rpc-gascap", value_name = "GAS_CAP", value_parser = RangedU64ValueParser::::new().range(1..), - default_value_t = RPC_DEFAULT_GAS_CAP.into() + default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP )] pub rpc_gas_cap: u64, + /// The maximum proof window for historical proof generation. + /// This value allows for generating historical proofs up to + /// configured number of blocks from current tip (up to `tip - window`). + #[arg( + long = "rpc.eth-proof-window", + default_value_t = constants::DEFAULT_ETH_PROOF_WINDOW, + value_parser = RangedU64ValueParser::::new().range(..=constants::MAX_ETH_PROOF_WINDOW) + )] + pub rpc_eth_proof_window: u64, + + /// Maximum number of concurrent getproof requests. + #[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)] + pub rpc_proof_permits: usize, + /// State cache configuration. #[command(flatten)] pub rpc_state_cache: RpcStateCacheArgs, @@ -285,9 +299,11 @@ impl Default for RpcServerArgs { rpc_max_tracing_requests: constants::default_max_tracing_requests(), rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), - rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(), + rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, + rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), + rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, } } } diff --git a/crates/node-core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs similarity index 100% rename from crates/node-core/src/args/rpc_state_cache.rs rename to crates/node/core/src/args/rpc_state_cache.rs diff --git a/crates/node-core/src/args/stage.rs b/crates/node/core/src/args/stage.rs similarity index 100% rename from crates/node-core/src/args/stage.rs rename to crates/node/core/src/args/stage.rs diff --git a/crates/node-core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs similarity index 100% rename from crates/node-core/src/args/txpool.rs rename to crates/node/core/src/args/txpool.rs diff --git a/crates/node-core/src/args/types.rs b/crates/node/core/src/args/types.rs similarity index 100% rename from crates/node-core/src/args/types.rs rename to crates/node/core/src/args/types.rs diff --git a/crates/node/core/src/args/utils.rs b/crates/node/core/src/args/utils.rs new file mode 100644 index 000000000000..064505ccb862 --- /dev/null +++ b/crates/node/core/src/args/utils.rs @@ -0,0 +1,81 @@ +//! Clap parser utilities + +use alloy_genesis::Genesis; +use reth_chainspec::ChainSpec; +use reth_fs_util as fs; +use std::{path::PathBuf, sync::Arc}; + +use reth_chainspec::DEV; + +#[cfg(feature = "optimism")] +use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; + +#[cfg(not(feature = "optimism"))] +use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; + +#[cfg(feature = "optimism")] +/// Chains supported by op-reth. First value should be used as the default. +pub const SUPPORTED_CHAINS: &[&str] = &["optimism", "optimism-sepolia", "base", "base-sepolia"]; +#[cfg(not(feature = "optimism"))] +/// Chains supported by reth. First value should be used as the default. +pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "holesky", "dev"]; + +/// The help info for the --chain flag +pub fn chain_help() -> String { + format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", ")) +} + +/// Clap value parser for [`ChainSpec`]s. +/// +/// The value parser matches either a known chain, the path +/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. +pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { + Ok(match s { + #[cfg(not(feature = "optimism"))] + "mainnet" => MAINNET.clone(), + #[cfg(not(feature = "optimism"))] + "sepolia" => SEPOLIA.clone(), + #[cfg(not(feature = "optimism"))] + "holesky" => HOLESKY.clone(), + "dev" => DEV.clone(), + #[cfg(feature = "optimism")] + "optimism" => OP_MAINNET.clone(), + #[cfg(feature = "optimism")] + "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), + #[cfg(feature = "optimism")] + "base" => BASE_MAINNET.clone(), + #[cfg(feature = "optimism")] + "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), + _ => { + // try to read json from path first + let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { + Ok(raw) => raw, + Err(io_err) => { + // valid json may start with "\n", but must contain "{" + if s.contains('{') { + s.to_string() + } else { + return Err(io_err.into()) // assume invalid path + } + } + }; + + // both serialized Genesis and ChainSpec structs supported + let genesis: Genesis = serde_json::from_str(&raw)?; + + Arc::new(genesis.into()) + } + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_known_chain_spec() { + for chain in SUPPORTED_CHAINS { + chain_value_parser(chain).unwrap(); + } + } +} diff --git a/crates/node-core/src/cli/config.rs b/crates/node/core/src/cli/config.rs similarity index 100% rename from crates/node-core/src/cli/config.rs rename to crates/node/core/src/cli/config.rs diff --git a/crates/node-core/src/cli/mod.rs b/crates/node/core/src/cli/mod.rs similarity index 100% rename from crates/node-core/src/cli/mod.rs rename to crates/node/core/src/cli/mod.rs diff --git a/crates/node-core/src/dirs.rs b/crates/node/core/src/dirs.rs similarity index 97% rename from crates/node-core/src/dirs.rs rename to crates/node/core/src/dirs.rs index e8ee5b83207f..a43350c2890c 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -1,7 +1,7 @@ //! reth data directories. use crate::{args::DatadirArgs, utils::parse_path}; -use reth_primitives::Chain; +use reth_chainspec::Chain; use std::{ env::VarError, fmt::{Debug, Display, Formatter}, @@ -259,10 +259,10 @@ impl From for MaybePlatformPath { /// Wrapper type around `PlatformPath` that includes a `Chain`, used for separating reth data for /// different networks. /// -/// If the chain is either mainnet, goerli, or sepolia, then the path will be: +/// If the chain is either mainnet, sepolia, or holesky, then the path will be: /// * mainnet: `/mainnet` -/// * goerli: `/goerli` /// * sepolia: `/sepolia` +/// * holesky: `/holesky` /// /// Otherwise, the path will be dependent on the chain ID: /// * `/` @@ -383,10 +383,6 @@ mod tests { #[test] fn test_maybe_testnet_datadir_path() { - let path = MaybePlatformPath::::default(); - let path = path.unwrap_or_chain_default(Chain::goerli(), DatadirArgs::default()); - assert!(path.as_ref().ends_with("reth/goerli"), "{path:?}"); - let path = MaybePlatformPath::::default(); let path = path.unwrap_or_chain_default(Chain::holesky(), DatadirArgs::default()); assert!(path.as_ref().ends_with("reth/holesky"), "{path:?}"); diff --git a/crates/node-core/src/exit.rs b/crates/node/core/src/exit.rs similarity index 53% rename from crates/node-core/src/exit.rs rename to crates/node/core/src/exit.rs index 85adf6cb6607..5dc6e5638d80 100644 --- a/crates/node-core/src/exit.rs +++ b/crates/node/core/src/exit.rs @@ -1,32 +1,39 @@ //! Helper types for waiting for the node to exit. -use futures::FutureExt; -use reth_beacon_consensus::BeaconConsensusEngineError; +use futures::{future::BoxFuture, FutureExt}; use std::{ + fmt, future::Future, pin::Pin, task::{ready, Context, Poll}, }; -use tokio::sync::oneshot; /// A Future which resolves when the node exits -#[derive(Debug)] pub struct NodeExitFuture { - /// The receiver half of the channel for the consensus engine. - /// This can be used to wait for the consensus engine to exit. - consensus_engine_rx: Option>>, + /// The consensus engine future. + /// This can be polled to wait for the consensus engine to exit. + consensus_engine_fut: Option>>, /// Flag indicating whether the node should be terminated after the pipeline sync. terminate: bool, } +impl fmt::Debug for NodeExitFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NodeExitFuture") + .field("consensus_engine_fut", &"...") + .field("terminate", &self.terminate) + .finish() + } +} + impl NodeExitFuture { /// Create a new `NodeExitFuture`. - pub fn new( - consensus_engine_rx: oneshot::Receiver>, - terminate: bool, - ) -> Self { - Self { consensus_engine_rx: Some(consensus_engine_rx), terminate } + pub fn new(consensus_engine_fut: F, terminate: bool) -> Self + where + F: Future> + 'static + Send, + { + Self { consensus_engine_fut: Some(Box::pin(consensus_engine_fut)), terminate } } } @@ -35,18 +42,17 @@ impl Future for NodeExitFuture { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - if let Some(rx) = this.consensus_engine_rx.as_mut() { + if let Some(rx) = this.consensus_engine_fut.as_mut() { match ready!(rx.poll_unpin(cx)) { - Ok(res) => { - this.consensus_engine_rx.take(); - res?; + Ok(_) => { + this.consensus_engine_fut.take(); if this.terminate { Poll::Ready(Ok(())) } else { Poll::Pending } } - Err(err) => Poll::Ready(Err(err.into())), + Err(err) => Poll::Ready(Err(err)), } } else { Poll::Pending @@ -61,11 +67,9 @@ mod tests { #[tokio::test] async fn test_node_exit_future_terminate_true() { - let (tx, rx) = oneshot::channel::>(); + let fut = async { Ok(()) }; - let _ = tx.send(Ok(())); - - let node_exit_future = NodeExitFuture::new(rx, true); + let node_exit_future = NodeExitFuture::new(fut, true); let res = node_exit_future.await; @@ -74,11 +78,9 @@ mod tests { #[tokio::test] async fn test_node_exit_future_terminate_false() { - let (tx, rx) = oneshot::channel::>(); - - let _ = tx.send(Ok(())); + let fut = async { Ok(()) }; - let mut node_exit_future = NodeExitFuture::new(rx, false); + let mut node_exit_future = NodeExitFuture::new(fut, false); poll_fn(|cx| { assert!(node_exit_future.poll_unpin(cx).is_pending()); Poll::Ready(()) diff --git a/crates/node-core/src/lib.rs b/crates/node/core/src/lib.rs similarity index 93% rename from crates/node-core/src/lib.rs rename to crates/node/core/src/lib.rs index a8761110aeae..27a81cc26e7c 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -11,7 +11,6 @@ pub mod args; pub mod cli; pub mod dirs; -pub mod engine; pub mod exit; pub mod metrics; pub mod node_config; @@ -39,12 +38,12 @@ pub mod rpc { } /// Re-exported from `reth_rpc::eth`. pub mod eth { - pub use reth_rpc::eth::*; + pub use reth_rpc_eth_api::*; } /// Re-exported from `reth_rpc::rpc`. pub mod result { - pub use reth_rpc::result::*; + pub use reth_rpc_server_types::result::*; } /// Re-exported from `reth_rpc::eth`. diff --git a/crates/node-core/src/metrics/mod.rs b/crates/node/core/src/metrics/mod.rs similarity index 100% rename from crates/node-core/src/metrics/mod.rs rename to crates/node/core/src/metrics/mod.rs diff --git a/crates/node-core/src/metrics/prometheus_exporter.rs b/crates/node/core/src/metrics/prometheus_exporter.rs similarity index 94% rename from crates/node-core/src/metrics/prometheus_exporter.rs rename to crates/node/core/src/metrics/prometheus_exporter.rs index 49b8d59420b4..6e01a2ccd299 100644 --- a/crates/node-core/src/metrics/prometheus_exporter.rs +++ b/crates/node/core/src/metrics/prometheus_exporter.rs @@ -1,7 +1,8 @@ //! Prometheus exporter -use crate::metrics::version_metrics::register_version_metrics; +use crate::metrics::version_metrics::VersionInfo; use eyre::WrapErr; +use futures::{future::FusedFuture, FutureExt}; use http::Response; use metrics::describe_gauge; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; @@ -65,17 +66,14 @@ async fn start_endpoint( tokio::net::TcpListener::bind(listen_addr).await.wrap_err("Could not bind to address")?; task_executor.spawn_with_graceful_shutdown_signal(|signal| async move { - let mut shutdown = signal.ignore_guard(); + let mut shutdown = signal.ignore_guard().fuse(); loop { - let io = tokio::select! { - res = listener.accept() => match res { - Ok((stream, _remote_addr)) => stream, - Err(err) => { - tracing::error!(%err, "failed to accept connection"); - continue; - } - }, - _ = &mut shutdown => break, + let io = match listener.accept().await { + Ok((stream, _remote_addr)) => stream, + Err(err) => { + tracing::error!(%err, "failed to accept connection"); + continue; + } }; let handle = handle.clone(); @@ -89,7 +87,11 @@ async fn start_endpoint( if let Err(error) = jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown).await { - tracing::error!(%error, "metrics endpoint crashed") + tracing::debug!(%error, "failed to serve request") + } + + if shutdown.is_terminated() { + break; } } }); @@ -149,7 +151,7 @@ where process.describe(); describe_memory_stats(); describe_io_stats(); - register_version_metrics(); + VersionInfo::default().register_version_metrics(); Ok(()) } diff --git a/crates/node/core/src/metrics/version_metrics.rs b/crates/node/core/src/metrics/version_metrics.rs new file mode 100644 index 000000000000..03769d990f35 --- /dev/null +++ b/crates/node/core/src/metrics/version_metrics.rs @@ -0,0 +1,50 @@ +//! This exposes reth's version information over prometheus. + +use crate::version::{BUILD_PROFILE_NAME, VERGEN_GIT_SHA}; +use metrics::gauge; + +/// Contains version information for the application. +#[derive(Debug, Clone)] +pub struct VersionInfo { + /// The version of the application. + pub version: &'static str, + /// The build timestamp of the application. + pub build_timestamp: &'static str, + /// The cargo features enabled for the build. + pub cargo_features: &'static str, + /// The Git SHA of the build. + pub git_sha: &'static str, + /// The target triple for the build. + pub target_triple: &'static str, + /// The build profile (e.g., debug or release). + pub build_profile: &'static str, +} + +impl Default for VersionInfo { + fn default() -> Self { + Self { + version: env!("CARGO_PKG_VERSION"), + build_timestamp: env!("VERGEN_BUILD_TIMESTAMP"), + cargo_features: env!("VERGEN_CARGO_FEATURES"), + git_sha: VERGEN_GIT_SHA, + target_triple: env!("VERGEN_CARGO_TARGET_TRIPLE"), + build_profile: BUILD_PROFILE_NAME, + } + } +} + +impl VersionInfo { + /// This exposes reth's version information over prometheus. + pub fn register_version_metrics(&self) { + let labels: [(&str, &str); 6] = [ + ("version", self.version), + ("build_timestamp", self.build_timestamp), + ("cargo_features", self.cargo_features), + ("git_sha", self.git_sha), + ("target_triple", self.target_triple), + ("build_profile", self.build_profile), + ]; + + let _gauge = gauge!("info", &labels); + } +} diff --git a/crates/node-core/src/node_config.rs b/crates/node/core/src/node_config.rs similarity index 91% rename from crates/node-core/src/node_config.rs rename to crates/node/core/src/node_config.rs index bbed7be83ad1..f7bcf15073b3 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -11,12 +11,12 @@ use crate::{ }; use metrics_exporter_prometheus::PrometheusHandle; use once_cell::sync::Lazy; +use reth_chainspec::{ChainSpec, MAINNET}; use reth_config::config::PruneConfig; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_network_p2p::headers::client::HeadersClient; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, kzg::KzgSettings, BlockHashOrNumber, - BlockNumber, ChainSpec, Head, SealedHeader, B256, MAINNET, + revm_primitives::EnvKzgSettings, BlockHashOrNumber, BlockNumber, Head, SealedHeader, B256, }; use reth_provider::{ providers::StaticFileProvider, BlockHashReader, HeaderProvider, ProviderFactory, @@ -155,12 +155,25 @@ impl NodeConfig { .with_unused_ports() } - /// Sets --dev mode for the node + /// Sets --dev mode for the node. + /// + /// In addition to setting the `--dev` flag, this also: + /// - disables discovery in [`NetworkArgs`]. pub const fn dev(mut self) -> Self { self.dev.dev = true; + self.network.discovery.disable_discovery = true; self } + /// Sets --dev mode for the node [`NodeConfig::dev`], if `dev` is true. + pub const fn set_dev(self, dev: bool) -> Self { + if dev { + self.dev() + } else { + self + } + } + /// Set the data directory args for the node pub fn with_datadir_args(mut self, datadir_args: DatadirArgs) -> Self { self.datadir = datadir_args; @@ -239,27 +252,6 @@ impl NodeConfig { self } - /// Returns the initial pipeline target, based on whether or not the node is running in - /// `debug.tip` mode, `debug.continuous` mode, or neither. - /// - /// If running in `debug.tip` mode, the configured tip is returned. - /// Otherwise, if running in `debug.continuous` mode, the genesis hash is returned. - /// Otherwise, `None` is returned. This is what the node will do by default. - pub fn initial_pipeline_target(&self, genesis_hash: B256) -> Option { - if let Some(tip) = self.debug.tip { - // Set the provided tip as the initial pipeline target. - debug!(target: "reth::cli", %tip, "Tip manually set"); - Some(tip) - } else if self.debug.continuous { - // Set genesis as the initial pipeline target. - // This will allow the downloader to start - debug!(target: "reth::cli", "Continuous sync mode enabled"); - Some(genesis_hash) - } else { - None - } - } - /// Returns pruning configuration. pub fn prune_config(&self) -> Option { self.pruning.prune_config(&self.chain) @@ -287,9 +279,9 @@ impl NodeConfig { Ok(max_block) } - /// Loads '`MAINNET_KZG_TRUSTED_SETUP`' - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + /// Loads '`EnvKzgSettings::Default`' + pub const fn kzg_settings(&self) -> eyre::Result { + Ok(EnvKzgSettings::Default) } /// Installs the prometheus recorder. @@ -411,6 +403,7 @@ impl NodeConfig { /// [`RpcServerArgs::adjust_instance_ports`] method. pub fn adjust_instance_ports(&mut self) { self.rpc.adjust_instance_ports(self.instance); + self.network.adjust_instance_ports(self.instance); } /// Sets networking and RPC ports to zero, causing the OS to choose random unused ports when diff --git a/crates/node-core/src/utils.rs b/crates/node/core/src/utils.rs similarity index 68% rename from crates/node-core/src/utils.rs rename to crates/node/core/src/utils.rs index dd1e577a0c76..75672cd34968 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -2,23 +2,21 @@ //! blocks from the network. use eyre::Result; +use reth_chainspec::ChainSpec; use reth_consensus_common::validation::validate_block_pre_execution; -use reth_fs_util as fs; -use reth_network::NetworkManager; use reth_network_p2p::{ bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader}; -use reth_provider::BlockReader; +use reth_primitives::{BlockHashOrNumber, SealedBlock, SealedHeader}; use reth_rpc_types::engine::{JwtError, JwtSecret}; use std::{ env::VarError, path::{Path, PathBuf}, sync::Arc, }; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, info}; /// Parses a user-specified path with support for environment variables and common shorthands (e.g. /// ~ for the user's home directory). @@ -37,29 +35,6 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result(network: &NetworkManager, persistent_peers_file: Option) -where - C: BlockReader + Unpin, -{ - if let Some(file_path) = persistent_peers_file { - let known_peers = network.all_peers().collect::>(); - if let Ok(known_peers) = serde_json::to_string_pretty(&known_peers) { - trace!(target: "reth::cli", peers_file =?file_path, num_peers=%known_peers.len(), "Saving current peers"); - let parent_dir = file_path.parent().map(fs::create_dir_all).transpose(); - match parent_dir.and_then(|_| fs::write(&file_path, known_peers)) { - Ok(_) => { - info!(target: "reth::cli", peers_file=?file_path, "Wrote network peers to file"); - } - Err(err) => { - warn!(target: "reth::cli", %err, peers_file=?file_path, "Failed to write network peers to file"); - } - } - } - } -} - /// Get a single header from network pub async fn get_single_header( client: Client, diff --git a/crates/node-core/src/version.rs b/crates/node/core/src/version.rs similarity index 78% rename from crates/node-core/src/version.rs rename to crates/node/core/src/version.rs index db8bf09d1fe5..adc922787189 100644 --- a/crates/node-core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -11,8 +11,11 @@ pub const NAME_CLIENT: &str = "Reth"; /// The latest version from Cargo.toml. pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); -/// The short SHA of the latest commit. -pub const VERGEN_GIT_SHA: &str = env!("VERGEN_GIT_SHA"); +/// The full SHA of the latest commit. +pub const VERGEN_GIT_SHA_LONG: &str = env!("VERGEN_GIT_SHA"); + +/// The 8 character short SHA of the latest commit. +pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, ..8); /// The build timestamp. pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); @@ -27,11 +30,11 @@ pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); /// ```text /// 0.1.0 (defa64b2) /// ``` -pub const SHORT_VERSION: &str = concat!( +pub const SHORT_VERSION: &str = const_format::concatcp!( env!("CARGO_PKG_VERSION"), env!("RETH_VERSION_SUFFIX"), " (", - env!("VERGEN_GIT_SHA"), + VERGEN_GIT_SHA, ")" ); @@ -52,13 +55,13 @@ pub const SHORT_VERSION: &str = concat!( /// Build Features: jemalloc /// Build Profile: maxperf /// ``` -pub const LONG_VERSION: &str = const_str::concat!( +pub const LONG_VERSION: &str = const_format::concatcp!( "Version: ", env!("CARGO_PKG_VERSION"), env!("RETH_VERSION_SUFFIX"), "\n", "Commit SHA: ", - env!("VERGEN_GIT_SHA"), + VERGEN_GIT_SHA_LONG, "\n", "Build Timestamp: ", env!("VERGEN_BUILD_TIMESTAMP"), @@ -67,9 +70,23 @@ pub const LONG_VERSION: &str = const_str::concat!( env!("VERGEN_CARGO_FEATURES"), "\n", "Build Profile: ", - build_profile_name() + BUILD_PROFILE_NAME ); +pub(crate) const BUILD_PROFILE_NAME: &str = { + // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // We split on the path separator of the *host* machine, which may be different from + // `std::path::MAIN_SEPARATOR_STR`. + const OUT_DIR: &str = env!("OUT_DIR"); + let unix_parts = const_format::str_split!(OUT_DIR, '/'); + if unix_parts.len() >= 4 { + unix_parts[unix_parts.len() - 4] + } else { + let win_parts = const_format::str_split!(OUT_DIR, '\\'); + win_parts[win_parts.len() - 4] + } +}; + /// The version information for reth formatted for P2P (devp2p). /// /// - The latest version from Cargo.toml @@ -81,11 +98,11 @@ pub const LONG_VERSION: &str = const_str::concat!( /// reth/v{major}.{minor}.{patch}-{sha1}/{target} /// ``` /// e.g.: `reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin` -pub(crate) const P2P_CLIENT_VERSION: &str = concat!( +pub(crate) const P2P_CLIENT_VERSION: &str = const_format::concatcp!( "reth/v", env!("CARGO_PKG_VERSION"), "-", - env!("VERGEN_GIT_SHA"), + VERGEN_GIT_SHA, "/", env!("VERGEN_CARGO_TARGET_TRIPLE") ); @@ -113,16 +130,6 @@ pub fn default_client_version() -> ClientVersion { } } -pub(crate) const fn build_profile_name() -> &'static str { - // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime - // We split on the path separator of the *host* machine, which may be different from - // `std::path::MAIN_SEPARATOR_STR`. - const OUT_DIR: &str = env!("OUT_DIR"); - const SEP: char = if const_str::contains!(OUT_DIR, "/") { '/' } else { '\\' }; - let parts = const_str::split!(OUT_DIR, SEP); - parts[parts.len() - 4] -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 4ae2a6f7683f..d592d25f3902 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -20,7 +20,10 @@ reth-prune.workspace = true reth-static-file.workspace = true reth-db-api.workspace = true reth-primitives.workspace = true -reth-rpc-types.workspace = true +reth-primitives-traits.workspace = true + +# alloy +alloy-rpc-types-engine.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 41205ebe9b16..e7d33a7bcb0a 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -1,16 +1,17 @@ //! Support for handling events emitted by node components. use crate::cl::ConsensusLayerHealthEvent; +use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; use reth_db_api::{database::Database, database_metrics::DatabaseMetadata}; -use reth_network::{NetworkEvent, NetworkHandle}; +use reth_network::NetworkEvent; use reth_network_api::PeersInfo; use reth_primitives::{constants, BlockNumber, B256}; +use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; -use reth_rpc_types::engine::ForkchoiceState; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; use reth_static_file::StaticFileProducerEvent; use std::{ @@ -35,8 +36,8 @@ struct NodeState { /// Used for freelist calculation reported in the "Status" log message. /// See [`EventHandler::poll`]. db: DB, - /// Connection to the network. - network: Option, + /// Information about connected peers. + peers_info: Option>, /// The stage currently being executed. current_stage: Option, /// The latest block reached by either pipeline or consensus engine. @@ -54,12 +55,12 @@ struct NodeState { impl NodeState { const fn new( db: DB, - network: Option, + peers_info: Option>, latest_block: Option, ) -> Self { Self { db, - network, + peers_info, current_stage: None, latest_block, latest_block_time: None, @@ -70,7 +71,7 @@ impl NodeState { } fn num_connected_peers(&self) -> usize { - self.network.as_ref().map(|net| net.num_connected_peers()).unwrap_or_default() + self.peers_info.as_ref().map(|info| info.num_connected_peers()).unwrap_or_default() } /// Processes an event emitted by the pipeline @@ -279,8 +280,8 @@ impl NodeState { hash=?block.hash(), peers=self.num_connected_peers(), txs=block.body.len(), - mgas=%format!("{:.3}MGas", block.header.gas_used as f64 / constants::MGAS_TO_GAS as f64), - mgas_throughput=%format!("{:.3}MGas/s", block.header.gas_used as f64 / elapsed.as_secs_f64() / constants::MGAS_TO_GAS as f64), + gas=%format_gas(block.header.gas_used), + gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / constants::GWEI_TO_WEI as f64), blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, @@ -437,7 +438,7 @@ impl From for NodeEvent { /// Displays relevant information to the user from components of the node, and periodically /// displays the high-level status of the node. pub async fn handle_events( - network: Option, + peers_info: Option>, latest_block_number: Option, events: E, db: DB, @@ -445,7 +446,7 @@ pub async fn handle_events( E: Stream + Unpin, DB: DatabaseMetadata + Database + 'static, { - let state = NodeState::new(db, network, latest_block_number); + let state = NodeState::new(db, peers_info, latest_block_number); let start = tokio::time::Instant::now() + Duration::from_secs(3); let mut info_interval = tokio::time::interval_at(start, INFO_MESSAGE_INTERVAL); diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml new file mode 100644 index 000000000000..b5cd12c33893 --- /dev/null +++ b/crates/optimism/cli/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "reth-optimism-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-static-file-types = { workspace = true, features = ["clap"] } +clap = { workspace = true, features = ["derive", "env"] } +reth-cli-commands.workspace = true +reth-consensus.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-db-api.workspace = true +reth-downloaders.workspace = true +reth-optimism-primitives.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-stages.workspace = true +reth-static-file.workspace = true +reth-execution-types.workspace = true +reth-node-core.workspace = true +reth-primitives.workspace = true + + +reth-stages-types.workspace = true +reth-node-events.workspace = true +reth-network-p2p.workspace = true +reth-errors.workspace = true + +reth-config.workspace = true +alloy-primitives.workspace = true +futures-util.workspace = true +reth-evm-optimism.workspace = true + + + +tokio = { workspace = true, features = [ + "sync", + "macros", + "time", + "rt-multi-thread", +] } +tracing.workspace = true +eyre.workspace = true + +[features] + optimism = [ + "reth-primitives/optimism", + "reth-evm-optimism/optimism", + ] \ No newline at end of file diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs new file mode 100644 index 000000000000..29761d0f7e49 --- /dev/null +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -0,0 +1,96 @@ +use alloy_primitives::B256; +use futures_util::{Stream, StreamExt}; +use reth_config::Config; +use reth_consensus::Consensus; +use reth_db_api::database::Database; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_errors::ProviderError; +use reth_evm_optimism::OpExecutorProvider; +use reth_network_p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, +}; +use reth_node_events::node::NodeEvent; +use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; +use reth_prune::PruneModes; +use reth_stages::{sets::DefaultStages, Pipeline, StageSet}; +use reth_stages_types::StageId; +use reth_static_file::StaticFileProducer; +use std::sync::Arc; +use tokio::sync::watch; + +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub async fn build_import_pipeline( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc, + static_file_producer: StaticFileProducer, + disable_exec: bool, +) -> eyre::Result<(Pipeline, impl Stream)> +where + DB: Database + Clone + Unpin + 'static, + C: Consensus + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); + } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let executor = OpExecutorProvider::optimism(provider_factory.chain_spec()); + + let max_block = file_client.max_block().unwrap_or(0); + + let pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + tip_rx, + consensus.clone(), + header_downloader, + body_downloader, + executor, + config.stages.clone(), + PruneModes::default(), + ) + .builder() + .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} diff --git a/bin/reth/src/commands/import_op.rs b/crates/optimism/cli/src/commands/import.rs similarity index 96% rename from bin/reth/src/commands/import_op.rs rename to crates/optimism/cli/src/commands/import.rs index 3e2cae23c1d9..b1096dda2154 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -1,28 +1,24 @@ //! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a //! file. - -use crate::{ - commands::{ - common::{AccessRights, Environment, EnvironmentArgs}, - import::build_import_pipeline, - }, - version::SHORT_VERSION, -}; use clap::Parser; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_consensus::noop::NoopConsensus; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; +use reth_node_core::version::SHORT_VERSION; use reth_optimism_primitives::bedrock_import::is_dup_tx; use reth_provider::StageCheckpointReader; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::StageId; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tracing::{debug, error, info}; +use crate::commands::build_pipeline::build_import_pipeline; + /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] pub struct ImportOpCommand { diff --git a/bin/reth/src/commands/import_receipts_op.rs b/crates/optimism/cli/src/commands/import_receipts.rs similarity index 91% rename from bin/reth/src/commands/import_receipts_op.rs rename to crates/optimism/cli/src/commands/import_receipts.rs index cad0aa64cd32..f400681b5afb 100644 --- a/bin/reth/src/commands/import_receipts_op.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -1,22 +1,25 @@ //! Command that imports OP mainnet receipts from Bedrock datadir, exported via //! . -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_db::tables; use reth_db_api::{database::Database, transaction::DbTx}; use reth_downloaders::{ file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + file_codec_ovm_receipt::HackReceiptFileCodec, receipt_file_client::ReceiptFileClient, }; +use reth_execution_types::ExecutionOutcome; use reth_node_core::version::SHORT_VERSION; use reth_optimism_primitives::bedrock_import::is_dup_tx; -use reth_primitives::{Receipts, StaticFileSegment}; +use reth_primitives::Receipts; use reth_provider::{ - ExecutionOutcome, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, }; use reth_stages::StageId; +use reth_static_file_types::StaticFileSegment; use std::path::{Path, PathBuf}; use tracing::{debug, error, info, trace}; @@ -114,10 +117,16 @@ where // open file let mut reader = ChunkedFileReader::new(path, chunk_len).await?; - while let Some(file_client) = reader.next_chunk::().await? { + while let Some(file_client) = + reader.next_chunk::>().await? + { // create a new file client from chunk read from file - let ReceiptFileClient { mut receipts, first_block, total_receipts: total_receipts_chunk } = - file_client; + let ReceiptFileClient { + mut receipts, + first_block, + total_receipts: total_receipts_chunk, + .. + } = file_client; // mark these as decoded total_decoded_receipts += total_receipts_chunk; diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs new file mode 100644 index 000000000000..373e7802cd4a --- /dev/null +++ b/crates/optimism/cli/src/commands/mod.rs @@ -0,0 +1,4 @@ +/// Helper function to build an import pipeline. +pub mod build_pipeline; +pub mod import; +pub mod import_receipts; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs new file mode 100644 index 000000000000..67d0ccd6176c --- /dev/null +++ b/crates/optimism/cli/src/lib.rs @@ -0,0 +1,15 @@ +//! OP-Reth CLI implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +/// Optimism CLI commands. +pub mod commands; +pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 56e67ebca66b..bd538a167f10 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-consensus-common.workspace = true +reth-chainspec.workspace = true reth-primitives.workspace = true reth-consensus.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 745b639c774b..1f27ec56554e 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,12 +9,16 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ - validate_block_pre_execution, validate_header_extradata, validate_header_standalone, + validate_against_parent_4844, validate_against_parent_eip1559_base_fee, + validate_against_parent_hash_number, validate_against_parent_timestamp, + validate_block_pre_execution, validate_header_base_fee, validate_header_extradata, + validate_header_gas, }; use reth_primitives::{ - BlockWithSenders, ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + BlockWithSenders, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; @@ -44,8 +48,8 @@ impl OptimismBeaconConsensus { impl Consensus for OptimismBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_standalone(header, &self.chain_spec)?; - Ok(()) + validate_header_gas(header)?; + validate_header_base_fee(header, &self.chain_spec) } fn validate_header_against_parent( @@ -53,7 +57,19 @@ impl Consensus for OptimismBeaconConsensus { header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?; + validate_against_parent_hash_number(header, parent)?; + + if self.chain_spec.is_bedrock_active_at_block(header.number) { + validate_against_parent_timestamp(header, parent)?; + } + + validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + + // ensure that the blob gas fields for this block + if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + validate_against_parent_4844(header, parent)?; + } + Ok(()) } diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 1df1ed0875b8..f36becbf6828 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,7 +1,8 @@ +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, - ChainSpec, GotExpected, Receipt, B256, + GotExpected, Receipt, B256, }; /// Validate a block with regard to execution results: diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f64a341a9791..f53293edeebf 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -12,11 +12,13 @@ workspace = true [dependencies] # Reth +reth-chainspec.workspace = true +reth-ethereum-forks.workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-execution-errors.workspace = true -reth-provider.workspace = true +reth-execution-types.workspace = true reth-prune-types.workspace = true reth-consensus-common.workspace = true @@ -37,6 +39,7 @@ reth-revm = { workspace = true, features = ["test-utils"] } [features] optimism = [ "reth-primitives/optimism", - "reth-provider/optimism", + "reth-execution-types/optimism", "reth-optimism-consensus/optimism", + "reth-revm/optimism", ] diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs new file mode 100644 index 000000000000..1cdc917eac85 --- /dev/null +++ b/crates/optimism/evm/src/config.rs @@ -0,0 +1,133 @@ +use reth_chainspec::{ChainSpec, OptimismHardfork}; +use reth_ethereum_forks::{EthereumHardfork, Head}; + +/// Returns the spec id at the given timestamp. +/// +/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// timestamp. +pub fn revm_spec_by_timestamp_after_bedrock( + chain_spec: &ChainSpec, + timestamp: u64, +) -> revm_primitives::SpecId { + if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { + revm_primitives::FJORD + } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) { + revm_primitives::ECOTONE + } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) { + revm_primitives::CANYON + } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) { + revm_primitives::REGOLITH + } else { + revm_primitives::BEDROCK + } +} + +/// return `revm_spec` from spec configuration. +pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { + if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { + revm_primitives::FJORD + } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_head(block) { + revm_primitives::ECOTONE + } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_head(block) { + revm_primitives::CANYON + } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_head(block) { + revm_primitives::REGOLITH + } else if chain_spec.fork(OptimismHardfork::Bedrock).active_at_head(block) { + revm_primitives::BEDROCK + } else if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { + revm_primitives::PRAGUE + } else if chain_spec.fork(EthereumHardfork::Cancun).active_at_head(block) { + revm_primitives::CANCUN + } else if chain_spec.fork(EthereumHardfork::Shanghai).active_at_head(block) { + revm_primitives::SHANGHAI + } else if chain_spec.fork(EthereumHardfork::Paris).active_at_head(block) { + revm_primitives::MERGE + } else if chain_spec.fork(EthereumHardfork::London).active_at_head(block) { + revm_primitives::LONDON + } else if chain_spec.fork(EthereumHardfork::Berlin).active_at_head(block) { + revm_primitives::BERLIN + } else if chain_spec.fork(EthereumHardfork::Istanbul).active_at_head(block) { + revm_primitives::ISTANBUL + } else if chain_spec.fork(EthereumHardfork::Petersburg).active_at_head(block) { + revm_primitives::PETERSBURG + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_head(block) { + revm_primitives::BYZANTIUM + } else if chain_spec.fork(EthereumHardfork::SpuriousDragon).active_at_head(block) { + revm_primitives::SPURIOUS_DRAGON + } else if chain_spec.fork(EthereumHardfork::Tangerine).active_at_head(block) { + revm_primitives::TANGERINE + } else if chain_spec.fork(EthereumHardfork::Homestead).active_at_head(block) { + revm_primitives::HOMESTEAD + } else if chain_spec.fork(EthereumHardfork::Frontier).active_at_head(block) { + revm_primitives::FRONTIER + } else { + panic!( + "invalid hardfork chainspec: expected at least one hardfork, got {:?}", + chain_spec.hardforks + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_chainspec::ChainSpecBuilder; + + #[test] + fn test_revm_spec_by_timestamp_after_merge() { + #[inline(always)] + fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { + let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); + f(cs).build() + } + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.fjord_activated()), 0), + revm_primitives::FJORD + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.ecotone_activated()), 0), + revm_primitives::ECOTONE + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.canyon_activated()), 0), + revm_primitives::CANYON + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.bedrock_activated()), 0), + revm_primitives::BEDROCK + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.regolith_activated()), 0), + revm_primitives::REGOLITH + ); + } + + #[test] + fn test_to_revm_spec() { + #[inline(always)] + fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { + let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); + f(cs).build() + } + assert_eq!( + revm_spec(&op_cs(|cs| cs.fjord_activated()), &Head::default()), + revm_primitives::FJORD + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.ecotone_activated()), &Head::default()), + revm_primitives::ECOTONE + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.canyon_activated()), &Head::default()), + revm_primitives::CANYON + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.bedrock_activated()), &Head::default()), + revm_primitives::BEDROCK + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.regolith_activated()), &Head::default()), + revm_primitives::REGOLITH + ); + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index fd575250b4db..8f37f2554053 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,29 +1,28 @@ //! Optimism block executor. use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardfork}; use reth_evm::{ execute::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, + system_calls::apply_beacon_root_contract_call, ConfigureEvm, }; +use reth_execution_types::ExecutionOutcome; use reth_optimism_consensus::validate_block_post_execution; -use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, Receipt, Receipts, TxType, - Withdrawals, U256, -}; -use reth_provider::ExecutionOutcome; +use reth_primitives::{BlockNumber, BlockWithSenders, Header, Receipt, Receipts, TxType, U256}; use reth_prune_types::PruneModes; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, + state_change::post_block_balance_increments, Evm, State, }; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, }; use std::sync::Arc; use tracing::trace; @@ -44,7 +43,7 @@ impl OpExecutorProvider { impl OpExecutorProvider { /// Creates a new executor provider. - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } @@ -55,7 +54,7 @@ where { fn op_executor(&self, db: DB) -> OpBlockExecutor where - DB: Database, + DB: Database + std::fmt::Display>, { OpBlockExecutor::new( self.chain_spec.clone(), @@ -69,24 +68,26 @@ impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, { - type Executor> = OpBlockExecutor; + type Executor + std::fmt::Display>> = + OpBlockExecutor; - type BatchExecutor> = OpBatchExecutor; + type BatchExecutor + std::fmt::Display>> = + OpBatchExecutor; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + std::fmt::Display>, { self.op_executor(db) } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + std::fmt::Display>, { let executor = self.op_executor(db); OpBatchExecutor { executor, - batch_record: BlockBatchRecord::new(prune_modes), + batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), } } @@ -118,10 +119,11 @@ where mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> where - DB: Database, + DB: Database + std::fmt::Display>, { // apply pre execution changes apply_beacon_root_contract_call( + &self.evm_config, &self.chain_spec, block.timestamp, block.number, @@ -131,7 +133,7 @@ where // execute transactions let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); + self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), @@ -175,14 +177,21 @@ where .transpose() .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), - error: err.into(), + error: Box::new(new_err), } })?; @@ -211,7 +220,7 @@ where // this is only set for post-Canyon deposit transactions. deposit_receipt_version: (transaction.is_deposit() && self.chain_spec - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) + .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) .then_some(1), }); } @@ -236,7 +245,7 @@ pub struct OpBlockExecutor { impl OpBlockExecutor { /// Creates a new Ethereum block executor. - pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { + pub const fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } } @@ -255,7 +264,7 @@ impl OpBlockExecutor { impl OpBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { /// Configures a new evm configuration and block environment for the given block. /// @@ -263,7 +272,7 @@ where fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); let mut block_env = BlockEnv::default(); - EvmConfig::fill_cfg_and_block_env( + self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, self.chain_spec(), @@ -315,16 +324,8 @@ where block: &BlockWithSenders, total_difficulty: U256, ) -> Result<(), BlockExecutionError> { - let balance_increments = post_block_balance_increments( - self.chain_spec(), - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); + let balance_increments = + post_block_balance_increments(self.chain_spec(), block, total_difficulty); // increment balances self.state .increment_balances(balance_increments) @@ -337,7 +338,7 @@ where impl Executor for OpBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; @@ -394,7 +395,7 @@ impl OpBatchExecutor { impl BatchExecutor for OpBatchExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = ExecutionOutcome; @@ -436,6 +437,10 @@ where self.batch_record.set_tip(tip); } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); + } + fn size_hint(&self) -> Option { Some(self.executor.state.bundle_state.size_hint()) } @@ -444,9 +449,10 @@ where #[cfg(test)] mod tests { use super::*; + use reth_chainspec::ChainSpecBuilder; use reth_primitives::{ - b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, + b256, Account, Address, Block, Signature, StorageKey, StorageValue, Transaction, + TransactionSigned, TxEip1559, BASE_MAINNET, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -528,8 +534,7 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); @@ -610,8 +615,7 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 1f4c5e207f65..9e24634c12ef 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,8 +1,9 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; +use reth_chainspec::{ChainSpec, OptimismHardfork}; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{address, b256, hex, Address, Block, Bytes, ChainSpec, Hardfork, B256, U256}; +use reth_primitives::{address, b256, hex, Address, Block, Bytes, B256, U256}; use revm::{ primitives::{Bytecode, HashMap, SpecId}, DatabaseCommit, L1BlockInfo, @@ -190,13 +191,14 @@ impl RethL1BlockInfo for L1BlockInfo { return Ok(U256::ZERO); } - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) + { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, timestamp) { SpecId::ECOTONE - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { return Err(OptimismBlockExecutionError::L1BlockInfoError { @@ -213,11 +215,12 @@ impl RethL1BlockInfo for L1BlockInfo { timestamp: u64, input: &[u8], ) -> Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) + { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { return Err(OptimismBlockExecutionError::L1BlockInfoError { @@ -244,8 +247,9 @@ where // previous block timestamp (heuristically, block time is not perfectly constant at 2s), and the // chain is an optimism chain, then we need to force-deploy the create2 deployer contract. if chain_spec.is_optimism() && - chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, timestamp) && - !chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, timestamp.saturating_sub(2)) + chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) && + !chain_spec + .is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp.saturating_sub(2)) { trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index be3897ef389e..8a56014c5688 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -9,14 +9,17 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use reth_chainspec::ChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ - revm::{config::revm_spec, env::fill_op_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, ChainSpec, Head, Header, TransactionSigned, U256, + transaction::FillTxEnv, + Address, Head, Header, TransactionSigned, U256, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +mod config; +pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; mod execute; pub use execute::*; pub mod l1; @@ -24,6 +27,7 @@ pub use l1::*; mod error; pub use error::OptimismBlockExecutionError; +use revm_primitives::{Bytes, Env, OptimismFields, TxKind}; /// Optimism-related EVM configuration. #[derive(Debug, Default, Clone, Copy)] @@ -31,13 +35,57 @@ pub use error::OptimismBlockExecutionError; pub struct OptimismEvmConfig; impl ConfigureEvmEnv for OptimismEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - fill_op_tx_env(tx_env, transaction, sender, buf.into()); + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + transaction.fill_tx_env(tx_env, sender); + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + env.tx = TxEnv { + caller, + transact_to: TxKind::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the + // call, and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from + // the `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + authorization_list: None, + optimism: OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + // The L1 fee is not charged for the EIP-4788 transaction, submit zero bytes for the + // enveloped tx size. + enveloped_tx: Some(Bytes::default()), + }, + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; } fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, @@ -45,7 +93,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { ) { let spec_id = revm_spec( chain_spec, - Head { + &Head { number: header.number, timestamp: header.timestamp, difficulty: header.difficulty, @@ -98,7 +146,7 @@ mod tests { let chain_spec = ChainSpec::default(); let total_difficulty = U256::ZERO; - OptimismEvmConfig::fill_cfg_and_block_env( + OptimismEvmConfig::default().fill_cfg_and_block_env( &mut cfg_env, &mut block_env, &chain_spec, diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index f3ca7cf96b7e..1a32bcad6ec4 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -12,9 +12,12 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true +reth-auto-seal-consensus.workspace = true reth-basic-payload-builder.workspace = true +reth-consensus.workspace = true reth-optimism-payload-builder.workspace = true reth-rpc-types.workspace = true reth-rpc.workspace = true @@ -29,8 +32,11 @@ reth-evm.workspace = true reth-revm.workspace = true reth-evm-optimism.workspace = true reth-beacon-consensus.workspace = true +reth-optimism-consensus.workspace = true revm-primitives.workspace = true reth-discv5.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-eth-api.workspace = true # async async-trait.workspace = true @@ -40,22 +46,29 @@ tracing.workspace = true # misc clap.workspace = true serde.workspace = true -serde_json.workspace = true eyre.workspace = true parking_lot.workspace = true thiserror.workspace = true + +# rpc jsonrpsee.workspace = true +jsonrpsee-types.workspace = true +serde_json.workspace = true [dev-dependencies] reth.workspace = true reth-db.workspace = true -reth-revm = { workspace = true, features = ["test-utils"] } reth-e2e-test-utils.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } tokio.workspace = true alloy-primitives.workspace = true +alloy-genesis.workspace = true [features] optimism = [ + "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", @@ -63,4 +76,8 @@ optimism = [ "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", + "reth-revm/optimism", + "reth-auto-seal-consensus/optimism", + "reth-rpc-eth-types/optimism" ] +test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 061884257ce4..da16b53d5970 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,13 +1,13 @@ +use reth_chainspec::{ChainSpec, OptimismHardfork}; use reth_node_api::{ payload::{ validate_parent_beacon_block_root_presence, EngineApiMessageVersion, - EngineObjectValidationError, MessageValidationKind, PayloadOrAttributes, + EngineObjectValidationError, MessageValidationKind, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, EngineTypes, }; use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; -use reth_primitives::{ChainSpec, Hardfork}; use reth_rpc_types::{ engine::{ ExecutionPayloadEnvelopeV2, OptimismExecutionPayloadEnvelopeV3, @@ -21,10 +21,13 @@ use reth_rpc_types::{ #[non_exhaustive] pub struct OptimismEngineTypes; -impl EngineTypes for OptimismEngineTypes { +impl PayloadTypes for OptimismEngineTypes { + type BuiltPayload = OptimismBuiltPayload; type PayloadAttributes = OptimismPayloadAttributes; type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; - type BuiltPayload = OptimismBuiltPayload; +} + +impl EngineTypes for OptimismEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = OptimismExecutionPayloadEnvelopeV3; @@ -66,7 +69,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(Hardfork::Canyon).active_at_timestamp(timestamp); + let is_shanghai = chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index c62cb598580d..2ea24da6754a 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -11,11 +11,13 @@ use reth_evm_optimism::{OpExecutorProvider, OptimismEvmConfig}; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ components::{ - ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, + PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes}, BuilderContext, Node, PayloadBuilderConfig, }; +use reth_optimism_consensus::OptimismBeaconConsensus; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; @@ -23,6 +25,7 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, TransactionValidationTaskExecutor, }; +use std::sync::Arc; /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] @@ -47,6 +50,7 @@ impl OptimismNode { OptimismPayloadBuilder, OptimismNetworkBuilder, OptimismExecutorBuilder, + OptimismConsensusBuilder, > where Node: FullNodeTypes, @@ -61,6 +65,7 @@ impl OptimismNode { )) .network(OptimismNetworkBuilder { disable_txpool_gossip }) .executor(OptimismExecutorBuilder::default()) + .consensus(OptimismConsensusBuilder::default()) } } @@ -74,6 +79,7 @@ where OptimismPayloadBuilder, OptimismNetworkBuilder, OptimismExecutorBuilder, + OptimismConsensusBuilder, >; fn components_builder(self) -> Self::ComponentsBuilder { @@ -277,12 +283,14 @@ where // purposefully disable discv4 .disable_discv4_discovery() // apply discovery settings - .apply(|builder| { + .apply(|mut builder| { let rlpx_socket = (args.addr, args.port).into(); - let mut builder = args.discovery.apply_to_builder(builder, rlpx_socket); if !args.discovery.disable_discovery { - builder = builder.discovery_v5(reth_discv5::Config::builder(rlpx_socket)); + builder = builder.discovery_v5(args.discovery.discovery_v5_builder( + rlpx_socket, + ctx.chain_spec().bootnodes().unwrap_or_default(), + )); } builder @@ -302,3 +310,23 @@ where Ok(handle) } } + +/// A basic optimism consensus builder. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct OptimismConsensusBuilder; + +impl ConsensusBuilder for OptimismConsensusBuilder +where + Node: FullNodeTypes, +{ + type Consensus = Arc; + + async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { + if ctx.is_dev() { + Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) + } else { + Ok(Arc::new(OptimismBeaconConsensus::new(ctx.chain_spec()))) + } + } +} diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 5ae1ba7b2538..d7c3f49efbc9 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -1,13 +1,12 @@ //! Helpers for optimism specific RPC implementations. -use jsonrpsee::types::ErrorObject; +use std::sync::{atomic::AtomicUsize, Arc}; + +use jsonrpsee_types::error::{ErrorObject, INTERNAL_ERROR_CODE}; use reqwest::Client; -use reth_rpc::eth::{ - error::{EthApiError, EthResult}, - traits::RawTransactionForwarder, -}; +use reth_rpc_eth_api::RawTransactionForwarder; +use reth_rpc_eth_types::error::{EthApiError, EthResult}; use reth_rpc_types::ToRpcError; -use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer #[derive(Debug, thiserror::Error)] @@ -22,11 +21,7 @@ pub enum SequencerRpcError { impl ToRpcError for SequencerRpcError { fn to_rpc_error(&self) -> ErrorObject<'static> { - ErrorObject::owned( - jsonrpsee::types::error::INTERNAL_ERROR_CODE, - self.to_string(), - None::, - ) + ErrorObject::owned(INTERNAL_ERROR_CODE, self.to_string(), None::) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index eb3318893897..67a1b1f3df08 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -1,7 +1,8 @@ //! OP transaction pool types use parking_lot::RwLock; +use reth_chainspec::ChainSpec; use reth_evm_optimism::RethL1BlockInfo; -use reth_primitives::{Block, ChainSpec, GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; use reth_transaction_pool::{ @@ -202,9 +203,10 @@ pub struct OpL1BlockInfo { #[cfg(test)] mod tests { use crate::txpool::OpTransactionValidator; + use reth_chainspec::MAINNET; use reth_primitives::{ Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, TxKind, - MAINNET, U256, + U256, }; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index ad19086aeda6..feacabfb2c29 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,8 +1,10 @@ +use alloy_genesis::Genesis; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth_chainspec::{ChainSpecBuilder, BASE_MAINNET}; use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; +use reth_primitives::{Address, B256}; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 41024c6126fc..6aaec1076779 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-transaction-pool.workspace = true @@ -21,6 +22,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-evm-optimism.workspace = true +reth-execution-types.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-basic-payload-builder.workspace = true @@ -36,8 +38,10 @@ sha2.workspace = true [features] optimism = [ + "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-evm-optimism/optimism", -] \ No newline at end of file + "reth-revm/optimism", +] diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 7e839f9ee923..d00eecd60089 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -5,17 +5,16 @@ use crate::{ payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, }; use reth_basic_payload_builder::*; -use reth_evm::ConfigureEvm; +use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardfork}; +use reth_evm::{system_calls::pre_block_beacon_root_contract_call, ConfigureEvm}; +use reth_execution_types::ExecutionOutcome; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{ constants::{BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS}, eip4844::calculate_excess_blob_gas, - proofs, - revm::env::tx_env_with_recovered, - Block, ChainSpec, Hardfork, Header, IntoRecoveredTransaction, Receipt, TxType, - EMPTY_OMMER_ROOT_HASH, U256, + proofs, Block, Header, IntoRecoveredTransaction, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, U256, }; -use reth_provider::{ExecutionOutcome, StateProviderFactory}; +use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use revm::{ @@ -126,11 +125,13 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &self.evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, + block_number, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.parent_beacon_block_root, ) .map_err(|err| { warn!(target: "payload_builder", @@ -138,7 +139,7 @@ where %err, "failed to apply beacon root contract call for empty payload" ); - err + PayloadBuilderError::Internal(err.into()) })?; let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( @@ -279,18 +280,30 @@ where let block_number = initialized_block_env.number.to::(); - let is_regolith = chain_spec - .is_fork_active_at_timestamp(Hardfork::Regolith, attributes.payload_attributes.timestamp); + let is_regolith = chain_spec.is_fork_active_at_timestamp( + OptimismHardfork::Regolith, + attributes.payload_attributes.timestamp, + ); // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, - )?; + block_number, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + PayloadBuilderError::Internal(err.into()) + })?; // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), @@ -321,7 +334,7 @@ where } // Convert the transaction to a [TransactionSignedEcRecovered]. This is - // purely for the purposes of utilizing the [tx_env_with_recovered] function. + // purely for the purposes of utilizing the `evm_config.tx_env`` function. // Deposit transactions do not have signatures, so if the tx is a deposit, this // will just pull in its `from` address. let sequencer_tx = sequencer_tx.clone().try_into_ecrecovered().map_err(|_| { @@ -348,7 +361,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - tx_env_with_recovered(&sequencer_tx), + evm_config.tx_env(&sequencer_tx), ); let mut evm = evm_config.evm_with_env(&mut db, env); @@ -391,7 +404,7 @@ where // ensures this is only set for post-Canyon deposit transactions. deposit_receipt_version: chain_spec .is_fork_active_at_timestamp( - Hardfork::Canyon, + OptimismHardfork::Canyon, attributes.payload_attributes.timestamp, ) .then_some(1), @@ -427,7 +440,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - tx_env_with_recovered(&tx), + evm_config.tx_env(&tx), ); // Configure the environment for the block. diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 7f9bd1ce78ed..47db0d571ed8 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -3,13 +3,14 @@ //! Optimism builder support use alloy_rlp::Encodable; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm_optimism::revm_spec_by_timestamp_after_bedrock; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ - revm::config::revm_spec_by_timestamp_after_merge, revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}, - Address, BlobTransactionSidecar, ChainSpec, Header, SealedBlock, TransactionSigned, - Withdrawals, B256, U256, + Address, BlobTransactionSidecar, Header, SealedBlock, TransactionSigned, Withdrawals, B256, + U256, }; use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, OptimismExecutionPayloadEnvelopeV3, @@ -112,7 +113,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { let cfg = CfgEnv::default().with_chain_id(chain_spec.chain().id()); // ensure we're not missing any timestamp based hardforks - let spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp()); + let spec_id = revm_spec_by_timestamp_after_bedrock(chain_spec, self.timestamp()); // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is // cancun now, we need to set the excess blob gas to the default value @@ -179,7 +180,7 @@ pub struct OptimismBuiltPayload { impl OptimismBuiltPayload { /// Initializes the payload with the given initial block. - pub fn new( + pub const fn new( id: PayloadId, block: SealedBlock, fees: U256, diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml new file mode 100644 index 000000000000..f7599b4a07a3 --- /dev/null +++ b/crates/optimism/rpc/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "reth-optimism-rpc" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-errors.workspace = true +reth-evm.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-types.workspace = true +reth-chainspec.workspace = true +reth-provider.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-transaction-pool.workspace = true + +# ethereum +alloy-primitives.workspace = true + +# async +parking_lot.workspace = true +tokio.workspace = true \ No newline at end of file diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs new file mode 100644 index 000000000000..d580a30e96f4 --- /dev/null +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -0,0 +1,224 @@ +//! OP-Reth `eth_` endpoint implementation. + +use alloy_primitives::{Address, U64}; +use reth_chainspec::ChainInfo; +use reth_errors::RethResult; +use reth_evm::ConfigureEvm; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, +}; +use reth_rpc_eth_api::{ + helpers::{ + Call, EthApiSpec, EthBlocks, EthCall, EthFees, EthSigner, EthState, EthTransactions, + LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, LoadState, LoadTransaction, + SpawnBlocking, Trace, + }, + RawTransactionForwarder, +}; +use reth_rpc_eth_types::{EthStateCache, PendingBlock}; +use reth_rpc_types::SyncStatus; +use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; +use reth_transaction_pool::TransactionPool; +use std::future::Future; +use tokio::sync::{AcquireError, Mutex, OwnedSemaphorePermit}; + +/// OP-Reth `Eth` API implementation. +/// +/// This type provides the functionality for handling `eth_` related requests. +/// +/// This wraps a default `Eth` implementation, and provides additional functionality where the +/// optimism spec deviates from the default (ethereum) spec, e.g. transaction forwarding to the +/// sequencer, receipts, additional RPC fields for transaction receipts. +/// +/// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented +/// all the `Eth` helper traits and prerequisite traits. +#[derive(Debug, Clone)] +pub struct OpEthApi { + inner: Eth, +} + +impl OpEthApi { + /// Creates a new `OpEthApi` from the provided `Eth` implementation. + pub const fn new(inner: Eth) -> Self { + Self { inner } + } +} + +impl EthApiSpec for OpEthApi { + fn protocol_version(&self) -> impl Future> + Send { + self.inner.protocol_version() + } + + fn chain_id(&self) -> U64 { + self.inner.chain_id() + } + + fn chain_info(&self) -> RethResult { + self.inner.chain_info() + } + + fn accounts(&self) -> Vec
{ + self.inner.accounts() + } + + fn is_syncing(&self) -> bool { + self.inner.is_syncing() + } + + fn sync_status(&self) -> RethResult { + self.inner.sync_status() + } +} + +impl LoadBlock for OpEthApi { + fn provider(&self) -> impl BlockReaderIdExt { + LoadBlock::provider(&self.inner) + } + + fn cache(&self) -> &reth_rpc_eth_types::EthStateCache { + self.inner.cache() + } +} + +impl LoadPendingBlock for OpEthApi { + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory { + self.inner.provider() + } + + fn pool(&self) -> impl TransactionPool { + self.inner.pool() + } + + fn pending_block(&self) -> &Mutex> { + self.inner.pending_block() + } + + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} + +impl SpawnBlocking for OpEthApi { + fn io_task_spawner(&self) -> impl TaskSpawner { + self.inner.io_task_spawner() + } + + fn tracing_task_pool(&self) -> &BlockingTaskPool { + self.inner.tracing_task_pool() + } + + fn acquire_owned( + &self, + ) -> impl Future> + Send { + self.inner.acquire_owned() + } + + fn acquire_many_owned( + &self, + n: u32, + ) -> impl Future> + Send { + self.inner.acquire_many_owned(n) + } +} + +impl LoadReceipt for OpEthApi { + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + +impl LoadFee for OpEthApi { + fn provider(&self) -> impl reth_provider::BlockIdReader + HeaderProvider + ChainSpecProvider { + LoadFee::provider(&self.inner) + } + + fn cache(&self) -> &EthStateCache { + LoadFee::cache(&self.inner) + } + + fn gas_oracle(&self) -> &reth_rpc_eth_types::GasPriceOracle { + self.inner.gas_oracle() + } + + fn fee_history_cache(&self) -> &reth_rpc_eth_types::FeeHistoryCache { + self.inner.fee_history_cache() + } +} + +impl Call for OpEthApi { + fn call_gas_limit(&self) -> u64 { + self.inner.call_gas_limit() + } + + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} + +impl LoadState for OpEthApi { + fn provider(&self) -> impl StateProviderFactory { + LoadState::provider(&self.inner) + } + + fn cache(&self) -> &EthStateCache { + LoadState::cache(&self.inner) + } + + fn pool(&self) -> impl TransactionPool { + LoadState::pool(&self.inner) + } +} + +impl LoadTransaction for OpEthApi { + type Pool = Eth::Pool; + + fn provider(&self) -> impl reth_provider::TransactionsProvider { + LoadTransaction::provider(&self.inner) + } + + fn cache(&self) -> &EthStateCache { + LoadTransaction::cache(&self.inner) + } + + fn pool(&self) -> &Self::Pool { + LoadTransaction::pool(&self.inner) + } +} + +impl EthTransactions for OpEthApi { + fn provider(&self) -> impl BlockReaderIdExt { + EthTransactions::provider(&self.inner) + } + + fn raw_tx_forwarder(&self) -> Option> { + self.inner.raw_tx_forwarder() + } + + fn signers(&self) -> &parking_lot::RwLock>> { + self.inner.signers() + } +} + +impl EthBlocks for OpEthApi { + fn provider(&self) -> impl HeaderProvider { + EthBlocks::provider(&self.inner) + } +} + +impl EthState for OpEthApi { + fn max_proof_window(&self) -> u64 { + self.inner.max_proof_window() + } +} + +impl EthCall for OpEthApi {} + +impl EthFees for OpEthApi {} + +impl Trace for OpEthApi { + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs new file mode 100644 index 000000000000..cad90bc42bf8 --- /dev/null +++ b/crates/optimism/rpc/src/lib.rs @@ -0,0 +1,11 @@ +//! OP-Reth RPC support. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod eth; diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 68464d7f459a..de9be0e35b98 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-transaction-pool.workspace = true @@ -27,7 +28,7 @@ revm.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } -futures-core = "0.3" +futures-core.workspace = true futures-util.workspace = true # metrics diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 46952a5dc88c..eddff5a2b54e 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -11,6 +11,7 @@ use crate::metrics::PayloadBuilderMetrics; use futures_core::ready; use futures_util::FutureExt; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_payload_builder::{ database::CachedReads, error::PayloadBuilderError, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, @@ -18,20 +19,17 @@ use reth_payload_builder::{ use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, - proofs, BlockNumberOrTag, Bytes, ChainSpec, Request, SealedBlock, Withdrawals, B256, U256, + proofs, BlockNumberOrTag, Bytes, SealedBlock, Withdrawals, B256, U256, }; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, }; -use reth_revm::state_change::{ - apply_beacon_root_contract_call, apply_withdrawal_requests_contract_call, - post_block_withdrawals_balance_increments, -}; +use reth_revm::state_change::post_block_withdrawals_balance_increments; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{ - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - Database, DatabaseCommit, Evm, State, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, + Database, State, }; use std::{ fmt, @@ -636,7 +634,7 @@ pub struct PendingPayload

{ impl

PendingPayload

{ /// Constructs a `PendingPayload` future. - pub fn new( + pub const fn new( cancel: Cancelled, payload: oneshot::Receiver, PayloadBuilderError>>, ) -> Self { @@ -773,7 +771,7 @@ pub struct BuildArguments { impl BuildArguments { /// Create new build arguments. - pub fn new( + pub const fn new( client: Client, pool: Pool, cached_reads: CachedReads, @@ -922,79 +920,6 @@ pub fn commit_withdrawals>( }) } -/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. -/// -/// The parent beacon block root used for the call is gathered from the given -/// [`PayloadBuilderAttributes`]. -/// -/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state -/// change. -pub fn pre_block_beacon_root_contract_call( - db: &mut DB, - chain_spec: &ChainSpec, - block_number: u64, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - attributes: &Attributes, -) -> Result<(), PayloadBuilderError> -where - DB::Error: std::fmt::Display, - Attributes: PayloadBuilderAttributes, -{ - // apply pre-block EIP-4788 contract call - let mut evm_pre_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the pre block call needs the block itself - apply_beacon_root_contract_call( - chain_spec, - attributes.timestamp(), - block_number, - attributes.parent_beacon_block_root(), - &mut evm_pre_block, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into())) -} - -/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. -/// -/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the -/// [requests](Request). -pub fn post_block_withdrawal_requests_contract_call( - db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, -) -> Result, PayloadBuilderError> -where - DB::Error: std::fmt::Display, -{ - // apply post-block EIP-7002 contract call - let mut evm_post_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the post block call needs the block itself - apply_withdrawal_requests_contract_call(&mut evm_post_block) - .map_err(|err| PayloadBuilderError::Internal(err.into())) -} - /// Checks if the new payload is better than the current best. /// /// This compares the total fees of the blocks, higher is better. diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index ce82ae7ff8a7..735831e41ca1 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -18,7 +18,6 @@ reth-rpc-types.workspace = true reth-transaction-pool.workspace = true reth-errors.workspace = true reth-provider.workspace = true -reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 340a8510a105..03ca5084392d 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -35,7 +35,7 @@ use std::{ pub struct CachedReads { accounts: HashMap, contracts: HashMap, - block_hashes: HashMap, + block_hashes: HashMap, } // === impl CachedReads === @@ -114,7 +114,7 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { } } - fn block_hash(&mut self, number: U256) -> Result { + fn block_hash(&mut self, number: u64) -> Result { let code = match self.cached.block_hashes.entry(number) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => *entry.insert(self.db.block_hash_ref(number)?), @@ -148,7 +148,7 @@ impl<'a, DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'a, DB> { self.inner.borrow_mut().storage(address, index) } - fn block_hash_ref(&self, number: U256) -> Result { + fn block_hash_ref(&self, number: u64) -> Result { self.inner.borrow_mut().block_hash(number) } } diff --git a/crates/payload/builder/src/events.rs b/crates/payload/builder/src/events.rs index 4df81030fca8..271eb2267ec4 100644 --- a/crates/payload/builder/src/events.rs +++ b/crates/payload/builder/src/events.rs @@ -1,4 +1,4 @@ -use reth_engine_primitives::EngineTypes; +use reth_payload_primitives::PayloadTypes; use tokio::sync::broadcast; use tokio_stream::{ wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, @@ -7,7 +7,7 @@ use tokio_stream::{ /// Payload builder events. #[derive(Clone, Debug)] -pub enum Events { +pub enum Events { /// The payload attributes as /// they are received from the CL through the engine api. Attributes(Engine::PayloadBuilderAttributes), @@ -19,11 +19,11 @@ pub enum Events { /// Represents a receiver for various payload events. #[derive(Debug)] -pub struct PayloadEvents { +pub struct PayloadEvents { pub receiver: broadcast::Receiver>, } -impl PayloadEvents { +impl PayloadEvents { // Convert this receiver into a stream of PayloadEvents. pub fn into_stream(self) -> BroadcastStream> { BroadcastStream::new(self.receiver) diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 9cb1a8d66799..1ec9a1bacd55 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -2,8 +2,7 @@ use crate::{service::PayloadServiceCommand, PayloadBuilderHandle}; use futures_util::{ready, StreamExt}; -use reth_engine_primitives::EngineTypes; -use reth_payload_primitives::PayloadBuilderAttributes; +use reth_payload_primitives::{PayloadBuilderAttributes, PayloadTypes}; use std::{ future::Future, pin::Pin, @@ -14,14 +13,14 @@ use tokio_stream::wrappers::UnboundedReceiverStream; /// A service task that does not build any payloads. #[derive(Debug)] -pub struct NoopPayloadBuilderService { +pub struct NoopPayloadBuilderService { /// Receiver half of the command channel. command_rx: UnboundedReceiverStream>, } impl NoopPayloadBuilderService where - Engine: EngineTypes + 'static, + Engine: PayloadTypes + 'static, { /// Creates a new [`NoopPayloadBuilderService`]. pub fn new() -> (Self, PayloadBuilderHandle) { @@ -35,7 +34,7 @@ where impl Future for NoopPayloadBuilderService where - Engine: EngineTypes, + Engine: PayloadTypes, { type Output = (); diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index d0a39799cd49..236d0f64d6e9 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -11,8 +11,7 @@ use crate::{ KeepPayloadJobAlive, PayloadJob, }; use futures_util::{future::FutureExt, Stream, StreamExt}; -use reth_engine_primitives::EngineTypes; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadTypes}; use reth_provider::CanonStateNotification; use reth_rpc_types::engine::PayloadId; use std::{ @@ -32,7 +31,7 @@ type PayloadFuture

= Pin { +pub struct PayloadStore { inner: PayloadBuilderHandle, } @@ -40,7 +39,7 @@ pub struct PayloadStore { impl PayloadStore where - Engine: EngineTypes + 'static, + Engine: PayloadTypes + 'static, { /// Resolves the payload job and returns the best payload that has been built so far. /// @@ -76,7 +75,7 @@ where impl Clone for PayloadStore where - Engine: EngineTypes, + Engine: PayloadTypes, { fn clone(&self) -> Self { Self { inner: self.inner.clone() } @@ -85,7 +84,7 @@ where impl From> for PayloadStore where - Engine: EngineTypes, + Engine: PayloadTypes, { fn from(inner: PayloadBuilderHandle) -> Self { Self { inner } @@ -96,7 +95,7 @@ where /// /// This is the API used to create new payloads and to get the current state of existing ones. #[derive(Debug)] -pub struct PayloadBuilderHandle { +pub struct PayloadBuilderHandle { /// Sender half of the message channel to the [`PayloadBuilderService`]. to_service: mpsc::UnboundedSender>, } @@ -105,7 +104,7 @@ pub struct PayloadBuilderHandle { impl PayloadBuilderHandle where - Engine: EngineTypes + 'static, + Engine: PayloadTypes + 'static, { /// Creates a new payload builder handle for the given channel. /// @@ -191,7 +190,7 @@ where impl Clone for PayloadBuilderHandle where - Engine: EngineTypes, + Engine: PayloadTypes, { fn clone(&self) -> Self { Self { to_service: self.to_service.clone() } @@ -210,7 +209,7 @@ where #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct PayloadBuilderService where - Engine: EngineTypes, + Engine: PayloadTypes, Gen: PayloadJobGenerator, Gen::Job: PayloadJob, { @@ -236,7 +235,7 @@ const PAYLOAD_EVENTS_BUFFER_SIZE: usize = 20; impl PayloadBuilderService where - Engine: EngineTypes + 'static, + Engine: PayloadTypes + 'static, Gen: PayloadJobGenerator, Gen::Job: PayloadJob, ::BuiltPayload: Into, @@ -327,7 +326,7 @@ where impl PayloadBuilderService where - Engine: EngineTypes, + Engine: PayloadTypes, Gen: PayloadJobGenerator, Gen::Job: PayloadJob, ::BuiltPayload: Into, @@ -353,7 +352,7 @@ where impl Future for PayloadBuilderService where - Engine: EngineTypes + 'static, + Engine: PayloadTypes + 'static, Gen: PayloadJobGenerator + Unpin + 'static, ::Job: Unpin + 'static, St: Stream + Send + Unpin + 'static, @@ -453,7 +452,7 @@ where } /// Message type for the [`PayloadBuilderService`]. -pub enum PayloadServiceCommand { +pub enum PayloadServiceCommand { /// Start building a new payload. BuildNewPayload( Engine::PayloadBuilderAttributes, @@ -477,7 +476,7 @@ pub enum PayloadServiceCommand { impl fmt::Debug for PayloadServiceCommand where - Engine: EngineTypes, + Engine: PayloadTypes, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 26a3fab3fc2c..62f697ddd6cb 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -5,7 +5,7 @@ use crate::{ EthPayloadBuilderAttributes, PayloadBuilderHandle, PayloadBuilderService, PayloadJob, PayloadJobGenerator, }; -use reth_engine_primitives::EngineTypes; +use reth_payload_primitives::PayloadTypes; use reth_primitives::{Block, U256}; use reth_provider::CanonStateNotification; use std::{ @@ -24,7 +24,7 @@ pub fn test_payload_service() -> ( PayloadBuilderHandle, ) where - Engine: EngineTypes< + Engine: PayloadTypes< PayloadBuilderAttributes = EthPayloadBuilderAttributes, BuiltPayload = EthBuiltPayload, > + 'static, @@ -35,7 +35,7 @@ where /// Creates a new [`PayloadBuilderService`] for testing purposes and spawns it in the background. pub fn spawn_test_payload_service() -> PayloadBuilderHandle where - Engine: EngineTypes< + Engine: PayloadTypes< PayloadBuilderAttributes = EthPayloadBuilderAttributes, BuiltPayload = EthBuiltPayload, > + 'static, diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 1cb992da268f..ad63d46d8c72 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-errors.workspace = true reth-primitives.workspace = true reth-transaction-pool.workspace = true diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 89b014315425..085f8311d049 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; + pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; /// Contains traits to abstract over payload attributes types and default implementations of the @@ -19,8 +20,20 @@ pub use traits::{BuiltPayload, PayloadAttributes, PayloadBuilderAttributes}; mod payload; pub use payload::PayloadOrAttributes; -use reth_primitives::ChainSpec; -use std::fmt::Debug; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +/// The types that are used by the engine API. +pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone { + /// The built payload type. + type BuiltPayload: BuiltPayload + Clone + Unpin; + + /// The RPC payload attributes type the CL node emits via the engine API. + type PayloadAttributes: PayloadAttributes + Unpin; + + /// The payload attributes type that contains information about a running payload job. + type PayloadBuilderAttributes: PayloadBuilderAttributes + + Clone + + Unpin; +} /// Validates the timestamp depending on the version called: /// diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 7bb1ac0c83f4..259b14b3f6e3 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,6 +1,7 @@ +use reth_chainspec::ChainSpec; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Address, ChainSpec, Header, SealedBlock, Withdrawals, B256, U256, + Address, Header, SealedBlock, Withdrawals, B256, U256, }; use reth_rpc_types::{ engine::{OptimismPayloadAttributes, PayloadAttributes as EthPayloadAttributes, PayloadId}, diff --git a/crates/payload/validator/Cargo.toml b/crates/payload/validator/Cargo.toml index d138001a7061..66efe865ceba 100644 --- a/crates/payload/validator/Cargo.toml +++ b/crates/payload/validator/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index c4bd7e6af7cc..ddb43bdd617e 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,7 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_primitives::{ChainSpec, SealedBlock}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_primitives::SealedBlock; use reth_rpc_types::{engine::MaybeCancunPayloadFields, ExecutionPayload, PayloadError}; use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; @@ -22,7 +23,7 @@ pub struct ExecutionPayloadValidator { impl ExecutionPayloadValidator { /// Create a new validator. - pub fn new(chain_spec: Arc) -> Self { + pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 50c5f3c24970..638cdccc6134 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -14,9 +14,20 @@ workspace = true [dependencies] reth-codecs.workspace = true +alloy-consensus = { workspace = true, features = ["serde"] } +alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true -alloy-consensus.workspace = true +alloy-rlp.workspace = true +alloy-rpc-types-eth = { workspace = true, optional = true } + +derive_more.workspace = true +revm-primitives = { workspace = true, features = ["serde"] } + +# misc +thiserror-no-std = { workspace = true, default-features = false } +roaring = "0.10.2" +byteorder = "1" # required by reth-codecs modular-bitfield.workspace = true @@ -26,18 +37,25 @@ serde.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } +proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true test-fuzz.workspace = true +rand.workspace = true +serde_json.workspace = true [features] +default = ["std"] +std = ["thiserror-no-std/std"] +test-utils = ["arbitrary"] arbitrary = [ + "alloy-consensus/arbitrary", "dep:arbitrary", "dep:proptest", - "dep:proptest-derive" + "dep:proptest-arbitrary-interop", ] - +alloy-compat = ["alloy-rpc-types-eth"] diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 3c3bb30335bf..21a8d199b3a9 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -1,7 +1,12 @@ use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::GenesisAccount; -use alloy_primitives::{keccak256, B256, U256}; +use alloy_primitives::{keccak256, Bytes, B256, U256}; +use byteorder::{BigEndian, ReadBytesExt}; +use bytes::Buf; +use derive_more::Deref; use reth_codecs::{main_codec, Compact}; +use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, JumpTable}; +use serde::{Deserialize, Serialize}; /// An Ethereum account. #[main_codec] @@ -45,3 +50,170 @@ impl Account { self.bytecode_hash.unwrap_or(KECCAK_EMPTY) } } + +/// Bytecode for an account. +/// +/// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Deref)] +pub struct Bytecode(pub RevmBytecode); + +impl Bytecode { + /// Create new bytecode from raw bytes. + /// + /// No analysis will be performed. + pub fn new_raw(bytes: Bytes) -> Self { + Self(RevmBytecode::new_raw(bytes)) + } +} + +impl Compact for Bytecode { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let bytecode = &self.0.bytecode()[..]; + buf.put_u32(bytecode.len() as u32); + buf.put_slice(bytecode); + let len = match &self.0 { + RevmBytecode::LegacyRaw(_) => { + buf.put_u8(0); + 1 + } + // `1` has been removed. + RevmBytecode::LegacyAnalyzed(analyzed) => { + buf.put_u8(2); + buf.put_u64(analyzed.original_len() as u64); + let map = analyzed.jump_table().as_slice(); + buf.put_slice(map); + 1 + 8 + map.len() + } + RevmBytecode::Eof(_) => { + // buf.put_u8(3); + // TODO(EOF) + todo!("EOF") + } + }; + len + bytecode.len() + 4 + } + + // # Panics + // + // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the + // database. + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { + let len = buf.read_u32::().expect("could not read bytecode length"); + let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); + let variant = buf.read_u8().expect("could not read bytecode variant"); + let decoded = match variant { + 0 => Self(RevmBytecode::new_raw(bytes)), + 1 => unreachable!("Junk data in database: checked Bytecode variant was removed"), + 2 => Self(unsafe { + RevmBytecode::new_analyzed( + bytes, + buf.read_u64::().unwrap() as usize, + JumpTable::from_slice(buf), + ) + }), + // TODO(EOF) + 3 => todo!("EOF"), + _ => unreachable!("Junk data in database: unknown Bytecode variant"), + }; + (decoded, &[]) + } +} + +impl From for Account { + fn from(revm_acc: AccountInfo) -> Self { + let code_hash = revm_acc.code_hash; + Self { + balance: revm_acc.balance, + nonce: revm_acc.nonce, + bytecode_hash: (code_hash != KECCAK_EMPTY).then_some(code_hash), + } + } +} + +impl From for AccountInfo { + fn from(reth_acc: Account) -> Self { + Self { + balance: reth_acc.balance, + nonce: reth_acc.nonce, + code_hash: reth_acc.bytecode_hash.unwrap_or(KECCAK_EMPTY), + code: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{hex_literal::hex, B256, U256}; + use revm_primitives::LegacyAnalyzedBytecode; + + #[test] + fn test_account() { + let mut buf = vec![]; + let mut acc = Account::default(); + let len = acc.to_compact(&mut buf); + assert_eq!(len, 2); + + acc.balance = U256::from(2); + let len = acc.to_compact(&mut buf); + assert_eq!(len, 3); + + acc.nonce = 2; + let len = acc.to_compact(&mut buf); + assert_eq!(len, 4); + } + + #[test] + fn test_empty_account() { + let mut acc = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None }; + // Nonce 0, balance 0, and bytecode hash set to None is considered empty. + assert!(acc.is_empty()); + + acc.bytecode_hash = Some(KECCAK_EMPTY); + // Nonce 0, balance 0, and bytecode hash set to KECCAK_EMPTY is considered empty. + assert!(acc.is_empty()); + + acc.balance = U256::from(2); + // Non-zero balance makes it non-empty. + assert!(!acc.is_empty()); + + acc.balance = U256::ZERO; + acc.nonce = 10; + // Non-zero nonce makes it non-empty. + assert!(!acc.is_empty()); + + acc.nonce = 0; + acc.bytecode_hash = Some(B256::from(U256::ZERO)); + // Non-empty bytecode hash makes it non-empty. + assert!(!acc.is_empty()); + } + + #[test] + fn test_bytecode() { + let mut buf = vec![]; + let bytecode = Bytecode::new_raw(Bytes::default()); + let len = bytecode.to_compact(&mut buf); + assert_eq!(len, 5); + + let mut buf = vec![]; + let bytecode = Bytecode::new_raw(Bytes::from(&hex!("ffff"))); + let len = bytecode.to_compact(&mut buf); + assert_eq!(len, 7); + + let mut buf = vec![]; + let bytecode = Bytecode(RevmBytecode::LegacyAnalyzed(LegacyAnalyzedBytecode::new( + Bytes::from(&hex!("ffff")), + 2, + JumpTable::from_slice(&[0]), + ))); + let len = bytecode.clone().to_compact(&mut buf); + assert_eq!(len, 16); + + let (decoded, remainder) = Bytecode::from_compact(&buf, len); + assert_eq!(decoded, bytecode); + assert!(remainder.is_empty()); + } +} diff --git a/crates/primitives-traits/src/alloy_compat.rs b/crates/primitives-traits/src/alloy_compat.rs new file mode 100644 index 000000000000..4bf80e1f7c35 --- /dev/null +++ b/crates/primitives-traits/src/alloy_compat.rs @@ -0,0 +1,48 @@ +use super::Header; +use alloy_rpc_types_eth::{ConversionError, Header as RpcHeader}; + +impl TryFrom for Header { + type Error = ConversionError; + + fn try_from(header: RpcHeader) -> Result { + Ok(Self { + base_fee_per_gas: header + .base_fee_per_gas + .map(|base_fee_per_gas| { + base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion) + }) + .transpose()?, + beneficiary: header.miner, + blob_gas_used: header + .blob_gas_used + .map(|blob_gas_used| { + blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion) + }) + .transpose()?, + difficulty: header.difficulty, + excess_blob_gas: header + .excess_blob_gas + .map(|excess_blob_gas| { + excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion) + }) + .transpose()?, + extra_data: header.extra_data, + gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?, + gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?, + logs_bloom: header.logs_bloom, + mix_hash: header.mix_hash.unwrap_or_default(), + nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0), + number: header.number.ok_or(ConversionError::MissingBlockNumber)?, + ommers_hash: header.uncles_hash, + parent_beacon_block_root: header.parent_beacon_block_root, + parent_hash: header.parent_hash, + receipts_root: header.receipts_root, + state_root: header.state_root, + timestamp: header.timestamp, + transactions_root: header.transactions_root, + withdrawals_root: header.withdrawals_root, + // TODO: requests_root: header.requests_root, + requests_root: None, + }) + } +} diff --git a/crates/primitives-traits/src/constants/gas_units.rs b/crates/primitives-traits/src/constants/gas_units.rs new file mode 100644 index 000000000000..0af0d2c24ce1 --- /dev/null +++ b/crates/primitives-traits/src/constants/gas_units.rs @@ -0,0 +1,80 @@ +use std::time::Duration; + +/// Represents one Kilogas, or `1_000` gas. +pub const KILOGAS: u64 = 1_000; + +/// Represents one Megagas, or `1_000_000` gas. +pub const MEGAGAS: u64 = KILOGAS * 1_000; + +/// Represents one Gigagas, or `1_000_000_000` gas. +pub const GIGAGAS: u64 = MEGAGAS * 1_000; + +/// Returns a formatted gas throughput log, showing either: +/// * "Kgas/s", or 1,000 gas per second +/// * "Mgas/s", or 1,000,000 gas per second +/// * "Ggas/s", or 1,000,000,000 gas per second +/// +/// Depending on the magnitude of the gas throughput. +pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { + let gas_per_second = gas as f64 / execution_duration.as_secs_f64(); + if gas_per_second < MEGAGAS as f64 { + format!("{:.2} Kgas/second", gas_per_second / KILOGAS as f64) + } else if gas_per_second < GIGAGAS as f64 { + format!("{:.2} Mgas/second", gas_per_second / MEGAGAS as f64) + } else { + format!("{:.2} Ggas/second", gas_per_second / GIGAGAS as f64) + } +} + +/// Returns a formatted gas log, showing either: +/// * "Kgas", or 1,000 gas +/// * "Mgas", or 1,000,000 gas +/// * "Ggas", or 1,000,000,000 gas +/// +/// Depending on the magnitude of gas. +pub fn format_gas(gas: u64) -> String { + let gas = gas as f64; + if gas < MEGAGAS as f64 { + format!("{:.2} Kgas", gas / KILOGAS as f64) + } else if gas < GIGAGAS as f64 { + format!("{:.2} Mgas", gas / MEGAGAS as f64) + } else { + format!("{:.2} Ggas", gas / GIGAGAS as f64) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gas_fmt() { + let gas = 100_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00 Kgas"); + + let gas = 100_000_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00 Mgas"); + + let gas = 100_000_000_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00 Ggas"); + } + + #[test] + fn test_gas_throughput_fmt() { + let duration = Duration::from_secs(1); + let gas = 100_000; + let throughput = format_gas_throughput(gas, duration); + assert_eq!(throughput, "100.00 Kgas/second"); + + let gas = 100_000_000; + let throughput = format_gas_throughput(gas, duration); + assert_eq!(throughput, "100.00 Mgas/second"); + + let gas = 100_000_000_000; + let throughput = format_gas_throughput(gas, duration); + assert_eq!(throughput, "100.00 Ggas/second"); + } +} diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs new file mode 100644 index 000000000000..34e286a3e1dd --- /dev/null +++ b/crates/primitives-traits/src/constants/mod.rs @@ -0,0 +1,170 @@ +//! Ethereum protocol-related constants + +use alloy_primitives::{address, b256, Address, B256, U256}; +use core::time::Duration; + +/// Gas units, for example [`GIGAGAS`]. +pub mod gas_units; +pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; + +/// The client version: `reth/v{major}.{minor}.{patch}` +pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); + +/// The first four bytes of the call data for a function call specifies the function to be called. +pub const SELECTOR_LEN: usize = 4; + +/// Maximum extra data size in a block after genesis +pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32; + +/// An EPOCH is a series of 32 slots. +pub const EPOCH_SLOTS: u64 = 32; + +/// The duration of a slot in seconds. +/// +/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a +/// block. +pub const SLOT_DURATION: Duration = Duration::from_secs(12); + +/// An EPOCH is a series of 32 slots (~6.4min). +pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); + +/// The default block nonce in the beacon consensus +pub const BEACON_NONCE: u64 = 0u64; + +/// The default Ethereum block gas limit. +// TODO: This should be a chain spec parameter. +/// See . +pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; + +/// The minimum tx fee below which the txpool will reject the transaction. +/// +/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 +/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` +/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because +/// 12.5% of 7 is less than 1. +/// +/// Note that min base fee under different 1559 parameterizations may differ, but there's no +/// significant harm in leaving this setting as is. +pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; + +/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256. +pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); + +/// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) +pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000; + +/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) +pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; + +/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) +pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2; + +/// Minimum gas limit allowed for transactions. +pub const MINIMUM_GAS_LIMIT: u64 = 5000; + +/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism +/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. +pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; + +/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon +/// hardfork. +pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; + +/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism +/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. +pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; + +/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism +/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. +pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; + +/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon +/// hardfork. +pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; + +/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism +/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. +pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; + +/// Base fee max change denominator for Base Sepolia as defined in the Optimism +/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. +pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; + +/// Multiplier for converting gwei to wei. +pub const GWEI_TO_WEI: u64 = 1_000_000_000; + +/// Multiplier for converting finney (milliether) to wei. +pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; + +/// Multiplier for converting ether to wei. +pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; + +/// The Ethereum mainnet genesis hash: +/// `0x0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3` +pub const MAINNET_GENESIS_HASH: B256 = + b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + +/// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` +pub const SEPOLIA_GENESIS_HASH: B256 = + b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); + +/// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` +pub const HOLESKY_GENESIS_HASH: B256 = + b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); + +/// Testnet genesis hash: `0x2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c` +pub const DEV_GENESIS_HASH: B256 = + b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c"); + +/// Keccak256 over empty array: `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` +pub const KECCAK_EMPTY: B256 = + b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); + +/// Ommer root of empty list: `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347` +pub const EMPTY_OMMER_ROOT_HASH: B256 = + b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); + +/// Root hash of an empty trie: `0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421` +pub const EMPTY_ROOT_HASH: B256 = + b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"); + +/// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` +pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); + +/// Transactions root of empty receipts set. +pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH; + +/// Transactions root of empty transactions set. +pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH; + +/// Withdrawals root of empty withdrawals set. +pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH; + +/// The number of blocks to unwind during a reorg that already became a part of canonical chain. +/// +/// In reality, the node can end up in this particular situation very rarely. It would happen only +/// if the node process is abruptly terminated during ongoing reorg and doesn't boot back up for +/// long period of time. +/// +/// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in +/// the database. +pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; + +/// Max seconds from current time allowed for blocks, before they're considered future blocks. +/// +/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the +/// future. +/// +/// See: +/// +pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn min_protocol_sanity() { + assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); + } +} diff --git a/crates/primitives/src/error.rs b/crates/primitives-traits/src/error.rs similarity index 95% rename from crates/primitives/src/error.rs rename to crates/primitives-traits/src/error.rs index 42257cc7ba65..8ae946c24208 100644 --- a/crates/primitives/src/error.rs +++ b/crates/primitives-traits/src/error.rs @@ -1,8 +1,11 @@ -use std::{ +use core::{ fmt, ops::{Deref, DerefMut}, }; +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; + /// A pair of values, one of which is expected and one of which is actual. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GotExpected { @@ -18,6 +21,7 @@ impl fmt::Display for GotExpected { } } +#[cfg(feature = "std")] impl std::error::Error for GotExpected {} impl From<(T, T)> for GotExpected { @@ -55,6 +59,7 @@ impl fmt::Display for GotExpectedBoxed { } } +#[cfg(feature = "std")] impl std::error::Error for GotExpectedBoxed {} impl Deref for GotExpectedBoxed { diff --git a/crates/primitives-traits/src/header/error.rs b/crates/primitives-traits/src/header/error.rs new file mode 100644 index 000000000000..6161afc1a5cb --- /dev/null +++ b/crates/primitives-traits/src/header/error.rs @@ -0,0 +1,8 @@ +/// Errors that can occur during header sanity checks. +#[derive(Debug, PartialEq, Eq)] +pub enum HeaderError { + /// Represents an error when the block difficulty is too large. + LargeDifficulty, + /// Represents an error when the block extradata is too large. + LargeExtraData, +} diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs new file mode 100644 index 000000000000..21a596ce7bd2 --- /dev/null +++ b/crates/primitives-traits/src/header/mod.rs @@ -0,0 +1,529 @@ +mod sealed; +pub use sealed::SealedHeader; + +mod error; +pub use error::HeaderError; + +#[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] +pub mod test_utils; + +use alloy_consensus::constants::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; +use alloy_eips::{ + calc_next_block_base_fee, eip1559::BaseFeeParams, merge::ALLOWED_FUTURE_BLOCK_TIME_SECONDS, + BlockNumHash, +}; +use alloy_primitives::{keccak256, Address, BlockNumber, Bloom, Bytes, B256, B64, U256}; +use alloy_rlp::{length_of_length, Decodable, Encodable}; +use bytes::BufMut; +use core::mem; +use reth_codecs::{add_arbitrary_tests, main_codec, Compact}; +use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; + +/// Block header +#[main_codec(no_arbitrary)] +#[add_arbitrary_tests(rlp, 25)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Header { + /// The Keccak 256-bit hash of the parent + /// block’s header, in its entirety; formally Hp. + pub parent_hash: B256, + /// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho. + pub ommers_hash: B256, + /// The 160-bit address to which all fees collected from the successful mining of this block + /// be transferred; formally Hc. + pub beneficiary: Address, + /// The Keccak 256-bit hash of the root node of the state trie, after all transactions are + /// executed and finalisations applied; formally Hr. + pub state_root: B256, + /// The Keccak 256-bit hash of the root node of the trie structure populated with each + /// transaction in the transactions list portion of the block; formally Ht. + pub transactions_root: B256, + /// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts + /// of each transaction in the transactions list portion of the block; formally He. + pub receipts_root: B256, + /// The Keccak 256-bit hash of the withdrawals list portion of this block. + /// + /// See [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895). + pub withdrawals_root: Option, + /// The Bloom filter composed from indexable information (logger address and log topics) + /// contained in each log entry from the receipt of each transaction in the transactions list; + /// formally Hb. + pub logs_bloom: Bloom, + /// A scalar value corresponding to the difficulty level of this block. This can be calculated + /// from the previous block’s difficulty level and the timestamp; formally Hd. + pub difficulty: U256, + /// A scalar value equal to the number of ancestor blocks. The genesis block has a number of + /// zero; formally Hi. + pub number: BlockNumber, + /// A scalar value equal to the current limit of gas expenditure per block; formally Hl. + pub gas_limit: u64, + /// A scalar value equal to the total gas used in transactions in this block; formally Hg. + pub gas_used: u64, + /// A scalar value equal to the reasonable output of Unix’s time() at this block’s inception; + /// formally Hs. + pub timestamp: u64, + /// A 256-bit hash which, combined with the + /// nonce, proves that a sufficient amount of computation has been carried out on this block; + /// formally Hm. + pub mix_hash: B256, + /// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of + /// computation has been carried out on this block; formally Hn. + pub nonce: u64, + /// A scalar representing EIP1559 base fee which can move up or down each block according + /// to a formula which is a function of gas used in parent block and gas target + /// (block gas limit divided by elasticity multiplier) of parent block. + /// The algorithm results in the base fee per gas increasing when blocks are + /// above the gas target, and decreasing when blocks are below the gas target. The base fee per + /// gas is burned. + pub base_fee_per_gas: Option, + /// The total amount of blob gas consumed by the transactions within the block, added in + /// EIP-4844. + pub blob_gas_used: Option, + /// A running total of blob gas consumed in excess of the target, prior to the block. Blocks + /// with above-target blob gas consumption increase this value, blocks with below-target blob + /// gas consumption decrease it (bounded at 0). This was added in EIP-4844. + pub excess_blob_gas: Option, + /// The hash of the parent beacon block's root is included in execution blocks, as proposed by + /// EIP-4788. + /// + /// This enables trust-minimized access to consensus state, supporting staking pools, bridges, + /// and more. + /// + /// The beacon roots contract handles root storage, enhancing Ethereum's functionalities. + pub parent_beacon_block_root: Option, + /// The Keccak 256-bit hash of the root node of the trie structure populated with each + /// [EIP-7685] request in the block body. + /// + /// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 + pub requests_root: Option, + /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or + /// fewer; formally Hx. + pub extra_data: Bytes, +} + +impl AsRef for Header { + fn as_ref(&self) -> &Self { + self + } +} + +impl Default for Header { + fn default() -> Self { + Self { + parent_hash: Default::default(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: Default::default(), + state_root: EMPTY_ROOT_HASH, + transactions_root: EMPTY_ROOT_HASH, + receipts_root: EMPTY_ROOT_HASH, + logs_bloom: Default::default(), + difficulty: Default::default(), + number: 0, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + mix_hash: Default::default(), + nonce: 0, + base_fee_per_gas: None, + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_root: None, + } + } +} + +impl Header { + /// Checks if the block's difficulty is set to zero, indicating a Proof-of-Stake header. + /// + /// This function is linked to EIP-3675, proposing the consensus upgrade to Proof-of-Stake: + /// [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#replacing-difficulty-with-0) + /// + /// Verifies whether, as per the EIP, the block's difficulty is updated to zero, + /// signifying the transition to a Proof-of-Stake mechanism. + /// + /// Returns `true` if the block's difficulty matches the constant zero set by the EIP. + pub fn is_zero_difficulty(&self) -> bool { + self.difficulty.is_zero() + } + + /// Performs a sanity check on the extradata field of the header. + /// + /// # Errors + /// + /// Returns an error if the extradata size is larger than 100 KB. + pub fn ensure_extradata_valid(&self) -> Result<(), HeaderError> { + if self.extra_data.len() > 100 * 1024 { + return Err(HeaderError::LargeExtraData) + } + Ok(()) + } + + /// Performs a sanity check on the block difficulty field of the header. + /// + /// # Errors + /// + /// Returns an error if the block difficulty exceeds 80 bits. + pub fn ensure_difficulty_valid(&self) -> Result<(), HeaderError> { + if self.difficulty.bit_len() > 80 { + return Err(HeaderError::LargeDifficulty) + } + Ok(()) + } + + /// Performs combined sanity checks on multiple header fields. + /// + /// This method combines checks for block difficulty and extradata sizes. + /// + /// # Errors + /// + /// Returns an error if either the block difficulty exceeds 80 bits + /// or if the extradata size is larger than 100 KB. + pub fn ensure_well_formed(&self) -> Result<(), HeaderError> { + self.ensure_difficulty_valid()?; + self.ensure_extradata_valid()?; + Ok(()) + } + + /// Checks if the block's timestamp is in the past compared to the parent block's timestamp. + /// + /// Note: This check is relevant only pre-merge. + pub const fn is_timestamp_in_past(&self, parent_timestamp: u64) -> bool { + self.timestamp <= parent_timestamp + } + + /// Checks if the block's timestamp is in the future based on the present timestamp. + /// + /// Clock can drift but this can be consensus issue. + /// + /// Note: This check is relevant only pre-merge. + pub const fn exceeds_allowed_future_timestamp(&self, present_timestamp: u64) -> bool { + self.timestamp > present_timestamp + ALLOWED_FUTURE_BLOCK_TIME_SECONDS + } + + /// Returns the parent block's number and hash + pub const fn parent_num_hash(&self) -> BlockNumHash { + BlockNumHash { number: self.number.saturating_sub(1), hash: self.parent_hash } + } + + /// Heavy function that will calculate hash of data and will *not* save the change to metadata. + /// Use [`Header::seal`], [`SealedHeader`] and unlock if you need hash to be persistent. + pub fn hash_slow(&self) -> B256 { + keccak256(alloy_rlp::encode(self)) + } + + /// Checks if the header is empty - has no transactions and no ommers + pub fn is_empty(&self) -> bool { + self.transaction_root_is_empty() && + self.ommers_hash_is_empty() && + self.withdrawals_root.map_or(true, |root| root == EMPTY_ROOT_HASH) + } + + /// Check if the ommers hash equals to empty hash list. + pub fn ommers_hash_is_empty(&self) -> bool { + self.ommers_hash == EMPTY_OMMER_ROOT_HASH + } + + /// Check if the transaction root equals to empty root. + pub fn transaction_root_is_empty(&self) -> bool { + self.transactions_root == EMPTY_ROOT_HASH + } + + /// Returns the blob fee for _this_ block according to the EIP-4844 spec. + /// + /// Returns `None` if `excess_blob_gas` is None + pub fn blob_fee(&self) -> Option { + self.excess_blob_gas.map(calc_blob_gasprice) + } + + /// Returns the blob fee for the next block according to the EIP-4844 spec. + /// + /// Returns `None` if `excess_blob_gas` is None. + /// + /// See also [`Self::next_block_excess_blob_gas`] + pub fn next_block_blob_fee(&self) -> Option { + self.next_block_excess_blob_gas().map(calc_blob_gasprice) + } + + /// Calculate base fee for next block according to the EIP-1559 spec. + /// + /// Returns a `None` if no base fee is set, no EIP-1559 support + pub fn next_block_base_fee(&self, base_fee_params: BaseFeeParams) -> Option { + Some(calc_next_block_base_fee( + self.gas_used as u128, + self.gas_limit as u128, + self.base_fee_per_gas? as u128, + base_fee_params, + ) as u64) + } + + /// Calculate excess blob gas for the next block according to the EIP-4844 spec. + /// + /// Returns a `None` if no excess blob gas is set, no EIP-4844 support + pub fn next_block_excess_blob_gas(&self) -> Option { + Some(calc_excess_blob_gas(self.excess_blob_gas?, self.blob_gas_used?)) + } + + /// Seal the header with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + #[inline] + pub const fn seal(self, hash: B256) -> SealedHeader { + SealedHeader::new(self, hash) + } + + /// Calculate hash and seal the Header so that it can't be changed. + #[inline] + pub fn seal_slow(self) -> SealedHeader { + let hash = self.hash_slow(); + self.seal(hash) + } + + /// Calculate a heuristic for the in-memory size of the [Header]. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // parent hash + mem::size_of::() + // ommers hash + mem::size_of::

() + // beneficiary + mem::size_of::() + // state root + mem::size_of::() + // transactions root + mem::size_of::() + // receipts root + mem::size_of::>() + // withdrawals root + mem::size_of::() + // logs bloom + mem::size_of::() + // difficulty + mem::size_of::() + // number + mem::size_of::() + // gas limit + mem::size_of::() + // gas used + mem::size_of::() + // timestamp + mem::size_of::() + // mix hash + mem::size_of::() + // nonce + mem::size_of::>() + // base fee per gas + mem::size_of::>() + // blob gas used + mem::size_of::>() + // excess blob gas + mem::size_of::>() + // parent beacon block root + self.extra_data.len() // extra data + } + + fn header_payload_length(&self) -> usize { + let mut length = 0; + length += self.parent_hash.length(); // Hash of the previous block. + length += self.ommers_hash.length(); // Hash of uncle blocks. + length += self.beneficiary.length(); // Address that receives rewards. + length += self.state_root.length(); // Root hash of the state object. + length += self.transactions_root.length(); // Root hash of transactions in the block. + length += self.receipts_root.length(); // Hash of transaction receipts. + length += self.logs_bloom.length(); // Data structure containing event logs. + length += self.difficulty.length(); // Difficulty value of the block. + length += U256::from(self.number).length(); // Block number. + length += U256::from(self.gas_limit).length(); // Maximum gas allowed. + length += U256::from(self.gas_used).length(); // Actual gas used. + length += self.timestamp.length(); // Block timestamp. + length += self.extra_data.length(); // Additional arbitrary data. + length += self.mix_hash.length(); // Hash used for mining. + length += B64::new(self.nonce.to_be_bytes()).length(); // Nonce for mining. + + if let Some(base_fee) = self.base_fee_per_gas { + // Adding base fee length if it exists. + length += U256::from(base_fee).length(); + } + + if let Some(root) = self.withdrawals_root { + // Adding withdrawals_root length if it exists. + length += root.length(); + } + + if let Some(blob_gas_used) = self.blob_gas_used { + // Adding blob_gas_used length if it exists. + length += U256::from(blob_gas_used).length(); + } + + if let Some(excess_blob_gas) = self.excess_blob_gas { + // Adding excess_blob_gas length if it exists. + length += U256::from(excess_blob_gas).length(); + } + + if let Some(parent_beacon_block_root) = self.parent_beacon_block_root { + length += parent_beacon_block_root.length(); + } + + if let Some(requests_root) = self.requests_root { + length += requests_root.length(); + } + + length + } +} + +impl Encodable for Header { + fn encode(&self, out: &mut dyn BufMut) { + // Create a header indicating the encoded content is a list with the payload length computed + // from the header's payload calculation function. + let list_header = + alloy_rlp::Header { list: true, payload_length: self.header_payload_length() }; + list_header.encode(out); + + // Encode each header field sequentially + self.parent_hash.encode(out); // Encode parent hash. + self.ommers_hash.encode(out); // Encode ommer's hash. + self.beneficiary.encode(out); // Encode beneficiary. + self.state_root.encode(out); // Encode state root. + self.transactions_root.encode(out); // Encode transactions root. + self.receipts_root.encode(out); // Encode receipts root. + self.logs_bloom.encode(out); // Encode logs bloom. + self.difficulty.encode(out); // Encode difficulty. + U256::from(self.number).encode(out); // Encode block number. + U256::from(self.gas_limit).encode(out); // Encode gas limit. + U256::from(self.gas_used).encode(out); // Encode gas used. + self.timestamp.encode(out); // Encode timestamp. + self.extra_data.encode(out); // Encode extra data. + self.mix_hash.encode(out); // Encode mix hash. + B64::new(self.nonce.to_be_bytes()).encode(out); // Encode nonce. + + // Encode base fee. + if let Some(ref base_fee) = self.base_fee_per_gas { + U256::from(*base_fee).encode(out); + } + + // Encode withdrawals root. + if let Some(ref root) = self.withdrawals_root { + root.encode(out); + } + + // Encode blob gas used. + if let Some(ref blob_gas_used) = self.blob_gas_used { + U256::from(*blob_gas_used).encode(out); + } + + // Encode excess blob gas. + if let Some(ref excess_blob_gas) = self.excess_blob_gas { + U256::from(*excess_blob_gas).encode(out); + } + + // Encode parent beacon block root. + if let Some(ref parent_beacon_block_root) = self.parent_beacon_block_root { + parent_beacon_block_root.encode(out); + } + + // Encode EIP-7685 requests root + if let Some(ref requests_root) = self.requests_root { + requests_root.encode(out); + } + } + + fn length(&self) -> usize { + let mut length = 0; + length += self.header_payload_length(); + length += length_of_length(length); + length + } +} + +impl Decodable for Header { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let rlp_head = alloy_rlp::Header::decode(buf)?; + if !rlp_head.list { + return Err(alloy_rlp::Error::UnexpectedString) + } + let started_len = buf.len(); + let mut this = Self { + parent_hash: Decodable::decode(buf)?, + ommers_hash: Decodable::decode(buf)?, + beneficiary: Decodable::decode(buf)?, + state_root: Decodable::decode(buf)?, + transactions_root: Decodable::decode(buf)?, + receipts_root: Decodable::decode(buf)?, + logs_bloom: Decodable::decode(buf)?, + difficulty: Decodable::decode(buf)?, + number: u64::decode(buf)?, + gas_limit: u64::decode(buf)?, + gas_used: u64::decode(buf)?, + timestamp: Decodable::decode(buf)?, + extra_data: Decodable::decode(buf)?, + mix_hash: Decodable::decode(buf)?, + nonce: u64::from_be_bytes(B64::decode(buf)?.0), + base_fee_per_gas: None, + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_root: None, + }; + if started_len - buf.len() < rlp_head.payload_length { + this.base_fee_per_gas = Some(u64::decode(buf)?); + } + + // Withdrawals root for post-shanghai headers + if started_len - buf.len() < rlp_head.payload_length { + this.withdrawals_root = Some(Decodable::decode(buf)?); + } + + // Blob gas used and excess blob gas for post-cancun headers + if started_len - buf.len() < rlp_head.payload_length { + this.blob_gas_used = Some(u64::decode(buf)?); + } + + if started_len - buf.len() < rlp_head.payload_length { + this.excess_blob_gas = Some(u64::decode(buf)?); + } + + // Decode parent beacon block root. + if started_len - buf.len() < rlp_head.payload_length { + this.parent_beacon_block_root = Some(B256::decode(buf)?); + } + + // Decode requests root. + if started_len - buf.len() < rlp_head.payload_length { + this.requests_root = Some(B256::decode(buf)?); + } + + let consumed = started_len - buf.len(); + if consumed != rlp_head.payload_length { + return Err(alloy_rlp::Error::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + Ok(this) + } +} + +#[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for Header { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + // Generate an arbitrary header, passing it to the generate_valid_header function to make + // sure it is valid _with respect to hardforks only_. + let base = Self { + parent_hash: u.arbitrary()?, + ommers_hash: u.arbitrary()?, + beneficiary: u.arbitrary()?, + state_root: u.arbitrary()?, + transactions_root: u.arbitrary()?, + receipts_root: u.arbitrary()?, + logs_bloom: u.arbitrary()?, + difficulty: u.arbitrary()?, + number: u.arbitrary()?, + gas_limit: u.arbitrary()?, + gas_used: u.arbitrary()?, + timestamp: u.arbitrary()?, + extra_data: u.arbitrary()?, + mix_hash: u.arbitrary()?, + nonce: u.arbitrary()?, + base_fee_per_gas: u.arbitrary()?, + blob_gas_used: u.arbitrary()?, + excess_blob_gas: u.arbitrary()?, + parent_beacon_block_root: u.arbitrary()?, + requests_root: u.arbitrary()?, + withdrawals_root: u.arbitrary()?, + }; + + Ok(test_utils::generate_valid_header( + base, + u.arbitrary()?, + u.arbitrary()?, + u.arbitrary()?, + u.arbitrary()?, + )) + } +} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs new file mode 100644 index 000000000000..355e4a9bc62a --- /dev/null +++ b/crates/primitives-traits/src/header/sealed.rs @@ -0,0 +1,136 @@ +use super::Header; +use alloy_eips::BlockNumHash; +use alloy_primitives::{keccak256, BlockHash}; +#[cfg(any(test, feature = "test-utils"))] +use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_rlp::{Decodable, Encodable}; +use bytes::BufMut; +use core::mem; +use derive_more::{AsRef, Deref}; +use reth_codecs::{add_arbitrary_tests, main_codec, Compact}; + +/// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want +/// to modify header. +#[main_codec(no_arbitrary)] +#[add_arbitrary_tests(rlp, compact)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] +pub struct SealedHeader { + /// Locked Header hash. + hash: BlockHash, + /// Locked Header fields. + #[as_ref] + #[deref] + header: Header, +} + +impl SealedHeader { + /// Creates the sealed header with the corresponding block hash. + #[inline] + pub const fn new(header: Header, hash: BlockHash) -> Self { + Self { header, hash } + } + + /// Returns the sealed Header fields. + #[inline] + pub const fn header(&self) -> &Header { + &self.header + } + + /// Returns header/block hash. + #[inline] + pub const fn hash(&self) -> BlockHash { + self.hash + } + + /// Extract raw header that can be modified. + pub fn unseal(self) -> Header { + self.header + } + + /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. + pub fn split(self) -> (Header, BlockHash) { + (self.header, self.hash) + } + + /// Return the number hash tuple. + pub fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number, self.hash) + } + + /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. + #[inline] + pub fn size(&self) -> usize { + self.header.size() + mem::size_of::() + } +} + +impl Default for SealedHeader { + fn default() -> Self { + Header::default().seal_slow() + } +} + +impl Encodable for SealedHeader { + fn encode(&self, out: &mut dyn BufMut) { + self.header.encode(out); + } +} + +impl Decodable for SealedHeader { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let started_len = buf.len(); + + // decode the header from temp buffer + let header = Header::decode(b)?; + + // hash the consumed bytes, the rlp encoded header + let consumed = started_len - b.len(); + let hash = keccak256(&buf[..consumed]); + + // update original buffer + *buf = *b; + + Ok(Self { header, hash }) + } +} + +#[cfg(any(test, feature = "test-utils"))] +impl SealedHeader { + /// Updates the block header. + pub fn set_header(&mut self, header: Header) { + self.header = header + } + + /// Updates the block hash. + pub fn set_hash(&mut self, hash: BlockHash) { + self.hash = hash + } + + /// Updates the parent block hash. + pub fn set_parent_hash(&mut self, hash: BlockHash) { + self.header.parent_hash = hash + } + + /// Updates the block number. + pub fn set_block_number(&mut self, number: BlockNumber) { + self.header.number = number; + } + + /// Updates the block state root. + pub fn set_state_root(&mut self, state_root: B256) { + self.header.state_root = state_root; + } + + /// Updates the block difficulty. + pub fn set_difficulty(&mut self, difficulty: U256) { + self.header.difficulty = difficulty; + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Header::arbitrary(u)?.seal_slow()) + } +} diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs new file mode 100644 index 000000000000..982b779d4b5b --- /dev/null +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -0,0 +1,67 @@ +//! Test utilities to generate random valid headers. + +use crate::Header; +use alloy_primitives::B256; +use proptest::{arbitrary::any, prop_compose}; +use proptest_arbitrary_interop::arb; + +/// Generates a header which is valid __with respect to past and future forks__. This means, for +/// example, that if the withdrawals root is present, the base fee per gas is also present. +/// +/// If blob gas used were present, then the excess blob gas and parent beacon block root are also +/// present. In this example, the withdrawals root would also be present. +/// +/// This __does not, and should not guarantee__ that the header is valid with respect to __anything +/// else__. +pub const fn generate_valid_header( + mut header: Header, + eip_4844_active: bool, + blob_gas_used: u64, + excess_blob_gas: u64, + parent_beacon_block_root: B256, +) -> Header { + // EIP-1559 logic + if header.base_fee_per_gas.is_none() { + // If EIP-1559 is not active, clear related fields + header.withdrawals_root = None; + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if header.withdrawals_root.is_none() { + // If EIP-4895 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if eip_4844_active { + // Set fields based on EIP-4844 being active + header.blob_gas_used = Some(blob_gas_used); + header.excess_blob_gas = Some(excess_blob_gas); + header.parent_beacon_block_root = Some(parent_beacon_block_root); + } else { + // If EIP-4844 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } + + // todo(onbjerg): adjust this for eip-7589 + header.requests_root = None; + + header +} + +prop_compose! { + /// Generates a proptest strategy for constructing an instance of a header which is valid __with + /// respect to past and future forks__. + /// + /// See docs for [generate_valid_header] for more information. + pub fn valid_header_strategy()( + header in arb::
(), + eip_4844_active in any::(), + blob_gas_used in any::(), + excess_blob_gas in any::(), + parent_beacon_block_root in arb::() + ) -> Header { + generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) + } +} diff --git a/crates/primitives/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs similarity index 94% rename from crates/primitives/src/integer_list.rs rename to crates/primitives-traits/src/integer_list.rs index 4dcb9a9bc9ae..8e258fd8b060 100644 --- a/crates/primitives/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -1,25 +1,21 @@ use bytes::BufMut; +use core::fmt; +use derive_more::Deref; use roaring::RoaringTreemap; use serde::{ de::{SeqAccess, Unexpected, Visitor}, ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer, }; -use std::{fmt, ops::Deref}; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; /// Uses Roaring Bitmaps to hold a list of integers. It provides really good compression with the /// capability to access its elements without decoding it. -#[derive(Clone, PartialEq, Default)] +#[derive(Clone, PartialEq, Default, Deref)] pub struct IntegerList(pub RoaringTreemap); -impl Deref for IntegerList { - type Target = RoaringTreemap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - impl fmt::Debug for IntegerList { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let vec: Vec = self.0.iter().collect(); @@ -105,7 +101,7 @@ struct IntegerListVisitor; impl<'de> Visitor<'de> for IntegerListVisitor { type Value = IntegerList; - fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("a usize array") } @@ -144,7 +140,7 @@ impl<'a> Arbitrary<'a> for IntegerList { } /// Primitives error type. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, thiserror_no_std::Error)] pub enum RoaringBitmapError { /// The provided input is invalid. #[error("the provided input is invalid")] diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index af8918de1977..b7a42d9c7b5c 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -6,10 +6,40 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "alloy-compat")] +mod alloy_compat; + +/// Common constants. +pub mod constants; +pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account pub mod account; -pub use account::Account; +pub use account::{Account, Bytecode}; + +mod integer_list; +pub use integer_list::IntegerList; + +pub mod request; +pub use request::{Request, Requests}; + +mod withdrawal; +pub use withdrawal::{Withdrawal, Withdrawals}; + +mod error; +pub use error::{GotExpected, GotExpectedBoxed}; + +mod log; +pub use log::{logs_bloom, Log, LogData}; + +mod storage; +pub use storage::StorageEntry; + +/// Common header types +pub mod header; +#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] +pub use header::test_utils; +pub use header::{Header, HeaderError, SealedHeader}; diff --git a/crates/primitives/src/log.rs b/crates/primitives-traits/src/log.rs similarity index 89% rename from crates/primitives/src/log.rs rename to crates/primitives-traits/src/log.rs index b2b6b8a4852c..aa6bc26a97a9 100644 --- a/crates/primitives/src/log.rs +++ b/crates/primitives-traits/src/log.rs @@ -1,6 +1,5 @@ -use crate::Bloom; - -pub use alloy_primitives::Log; +use alloy_primitives::Bloom; +pub use alloy_primitives::{Log, LogData}; /// Calculate receipt logs bloom. pub fn logs_bloom<'a>(logs: impl IntoIterator) -> Bloom { @@ -19,6 +18,7 @@ mod tests { use alloy_primitives::{Address, Bytes, Log as AlloyLog, B256}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; + use proptest_arbitrary_interop::arb; use reth_codecs::{main_codec, Compact}; /// This type is kept for compatibility tests after the codec support was added to @@ -29,12 +29,6 @@ mod tests { /// Contract that emitted this log. address: Address, /// Topics of the log. The number of logs depend on what `LOG` opcode is used. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=5)" - ) - )] topics: Vec, /// Arbitrary length data. data: Bytes, @@ -58,7 +52,7 @@ mod tests { proptest! { #[test] - fn test_roundtrip_conversion_between_log_and_alloy_log(log: Log) { + fn test_roundtrip_conversion_between_log_and_alloy_log(log in arb::()) { // Convert log to buffer and then create alloy_log from buffer and compare let mut compacted_log = Vec::::new(); let len = log.clone().to_compact(&mut compacted_log); diff --git a/crates/primitives/src/request.rs b/crates/primitives-traits/src/request.rs similarity index 74% rename from crates/primitives/src/request.rs rename to crates/primitives-traits/src/request.rs index e2ccc9701826..99c2375e2606 100644 --- a/crates/primitives/src/request.rs +++ b/crates/primitives-traits/src/request.rs @@ -1,31 +1,20 @@ //! EIP-7685 requests. -use crate::Request; +pub use alloy_consensus::Request; use alloy_eips::eip7685::{Decodable7685, Encodable7685}; use alloy_rlp::{Decodable, Encodable}; +use derive_more::{Deref, DerefMut, From, IntoIterator}; use reth_codecs::{main_codec, Compact}; use revm_primitives::Bytes; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// A list of EIP-7685 requests. #[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Default, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, Deref, DerefMut, From, IntoIterator)] pub struct Requests(pub Vec); -impl From> for Requests { - fn from(requests: Vec) -> Self { - Self(requests) - } -} - -impl IntoIterator for Requests { - type Item = Request; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - impl Encodable for Requests { fn encode(&self, out: &mut dyn bytes::BufMut) { let mut h = alloy_rlp::Header { list: true, payload_length: 0 }; diff --git a/crates/primitives/src/storage.rs b/crates/primitives-traits/src/storage.rs similarity index 97% rename from crates/primitives/src/storage.rs rename to crates/primitives-traits/src/storage.rs index ef3b2f0827d8..96e7ba15c171 100644 --- a/crates/primitives/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -1,4 +1,4 @@ -use super::{B256, U256}; +use alloy_primitives::{B256, U256}; use reth_codecs::{derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs similarity index 81% rename from crates/primitives/src/withdrawal.rs rename to crates/primitives-traits/src/withdrawal.rs index e4d1b37c0523..49d4e5e31269 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,8 +1,11 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; +use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; use reth_codecs::{main_codec, Compact}; -use std::ops::{Deref, DerefMut}; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; /// Re-export from `alloy_eips`. #[doc(inline)] @@ -10,34 +13,49 @@ pub use alloy_eips::eip4895::Withdrawal; /// Represents a collection of Withdrawals. #[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodableWrapper, RlpDecodableWrapper)] +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Default, + Hash, + From, + AsRef, + Deref, + DerefMut, + IntoIterator, + RlpEncodableWrapper, + RlpDecodableWrapper, +)] +#[as_ref(forward)] pub struct Withdrawals(Vec); impl Withdrawals { /// Create a new Withdrawals instance. - pub fn new(withdrawals: Vec) -> Self { + pub const fn new(withdrawals: Vec) -> Self { Self(withdrawals) } /// Calculate the total size, including capacity, of the Withdrawals. #[inline] pub fn total_size(&self) -> usize { - self.capacity() * std::mem::size_of::() + self.capacity() * core::mem::size_of::() } /// Calculate a heuristic for the in-memory size of the [Withdrawals]. #[inline] pub fn size(&self) -> usize { - self.len() * std::mem::size_of::() + self.len() * core::mem::size_of::() } /// Get an iterator over the Withdrawals. - pub fn iter(&self) -> std::slice::Iter<'_, Withdrawal> { + pub fn iter(&self) -> core::slice::Iter<'_, Withdrawal> { self.0.iter() } /// Get a mutable iterator over the Withdrawals. - pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, Withdrawal> { + pub fn iter_mut(&mut self) -> core::slice::IterMut<'_, Withdrawal> { self.0.iter_mut() } @@ -47,47 +65,30 @@ impl Withdrawals { } } -impl IntoIterator for Withdrawals { - type Item = Withdrawal; - type IntoIter = std::vec::IntoIter; - +impl<'a> IntoIterator for &'a Withdrawals { + type Item = &'a Withdrawal; + type IntoIter = core::slice::Iter<'a, Withdrawal>; fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() + self.iter() } } -impl AsRef<[Withdrawal]> for Withdrawals { - fn as_ref(&self) -> &[Withdrawal] { - &self.0 - } -} +impl<'a> IntoIterator for &'a mut Withdrawals { + type Item = &'a mut Withdrawal; + type IntoIter = core::slice::IterMut<'a, Withdrawal>; -impl Deref for Withdrawals { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Withdrawals { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl From> for Withdrawals { - fn from(withdrawals: Vec) -> Self { - Self(withdrawals) + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() } } #[cfg(test)] mod tests { use super::*; - use crate::Address; + use alloy_primitives::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; + use proptest_arbitrary_interop::arb; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// Withdrawal type natively @@ -125,7 +126,7 @@ mod tests { proptest!( #[test] - fn test_roundtrip_withdrawal_compat(withdrawal: RethWithdrawal) { + fn test_roundtrip_withdrawal_compat(withdrawal in arb::()) { // Convert to buffer and then create alloy_access_list from buffer and // compare let mut compacted_reth_withdrawal = Vec::::new(); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 0add20276386..1e37184ab85a 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,18 +16,14 @@ workspace = true reth-primitives-traits.workspace = true reth-codecs.workspace = true reth-ethereum-forks.workspace = true -reth-network-peers.workspace = true reth-static-file-types.workspace = true -reth-trie-types.workspace = true -revm.workspace = true +reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } +reth-chainspec = { workspace = true, optional = true } # ethereum -alloy-chains = { workspace = true, features = ["serde", "rlp"] } -alloy-consensus = { workspace = true, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-trie = { workspace = true, features = ["serde"] } alloy-rpc-types = { workspace = true, optional = true } alloy-genesis.workspace = true alloy-eips = { workspace = true, features = ["serde"] } @@ -43,31 +39,25 @@ c-kzg = { workspace = true, features = ["serde"], optional = true } # misc bytes.workspace = true -byteorder = "1" derive_more.workspace = true -itertools.workspace = true modular-bitfield.workspace = true once_cell.workspace = true rayon.workspace = true serde.workspace = true -serde_json.workspace = true tempfile = { workspace = true, optional = true } -thiserror.workspace = true +thiserror-no-std = { workspace = true, default-features = false } zstd = { version = "0.13", features = ["experimental"], optional = true } -roaring = "0.10.2" - -# `test-utils` feature -hash-db = { version = "~0.15", optional = true } -plain_hasher = { version = "0.2", optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } +# proptest-derive = { workspace = true, optional = true } [dev-dependencies] # eth +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } +reth-chainspec.workspace = true nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -75,6 +65,7 @@ alloy-eips = { workspace = true, features = ["arbitrary"] } assert_matches.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true rand.workspace = true serde_json.workspace = true @@ -82,9 +73,6 @@ test-fuzz.workspace = true toml.workspace = true triehash = "0.8" -hash-db = "~0.15" -plain_hasher = "0.2" - sucds = "0.8.1" criterion.workspace = true @@ -96,37 +84,31 @@ pprof = { workspace = true, features = [ secp256k1.workspace = true [features] -default = ["c-kzg", "zstd-codec", "alloy-compat"] +default = ["c-kzg", "zstd-codec", "alloy-compat", "std"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "reth-primitives-traits/arbitrary", "revm-primitives/arbitrary", + "reth-chainspec?/arbitrary", "reth-ethereum-forks/arbitrary", "nybbles/arbitrary", "alloy-trie/arbitrary", - "alloy-chains/arbitrary", "alloy-eips/arbitrary", "dep:arbitrary", "dep:proptest", - "dep:proptest-derive", "zstd-codec", ] -c-kzg = [ - "dep:c-kzg", - "revm/c-kzg", - "revm-primitives/c-kzg", - "dep:tempfile", - "alloy-eips/kzg", -] +c-kzg = ["dep:c-kzg", "revm-primitives/c-kzg", "dep:tempfile", "alloy-eips/kzg"] zstd-codec = ["dep:zstd"] -clap = ["reth-static-file-types/clap"] optimism = [ + "reth-chainspec/optimism", "reth-codecs/optimism", "reth-ethereum-forks/optimism", - "revm/optimism", + "revm-primitives/optimism", ] -alloy-compat = ["alloy-rpc-types"] -test-utils = ["dep:plain_hasher", "dep:hash-db"] +alloy-compat = ["reth-primitives-traits/alloy-compat", "dep:alloy-rpc-types"] +std = ["thiserror-no-std/std"] +test-utils = ["reth-primitives-traits/test-utils"] [[bench]] name = "recover_ecdsa_crit" @@ -137,11 +119,3 @@ name = "validate_blob_tx" required-features = ["arbitrary", "c-kzg"] harness = false -[[bench]] -name = "trie_root" -required-features = ["arbitrary", "test-utils"] -harness = false - -[[bench]] -name = "integer_list" -harness = false diff --git a/crates/primitives/benches/integer_list.rs b/crates/primitives/benches/integer_list.rs deleted file mode 100644 index 6dcec9139747..000000000000 --- a/crates/primitives/benches/integer_list.rs +++ /dev/null @@ -1,251 +0,0 @@ -#![allow(missing_docs)] -use criterion::{black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; -use rand::prelude::*; - -pub fn new_pre_sorted(c: &mut Criterion) { - let mut group = c.benchmark_group("new_pre_sorted"); - - for delta in [1, 100, 1000, 10000] { - let integers_usize = generate_integers(2000, delta); - assert_eq!(integers_usize.len(), 2000); - - let integers_u64 = integers_usize.iter().map(|v| *v as u64).collect::>(); - assert_eq!(integers_u64.len(), 2000); - - group.bench_function(BenchmarkId::new("Elias-Fano", delta), |b| { - b.iter(|| elias_fano::IntegerList::new_pre_sorted(black_box(&integers_usize))); - }); - - group.bench_function(BenchmarkId::new("Roaring Bitmaps", delta), |b| { - b.iter(|| reth_primitives::IntegerList::new_pre_sorted(black_box(&integers_u64))); - }); - } -} - -pub fn rank_select(c: &mut Criterion) { - let mut group = c.benchmark_group("rank + select"); - - for delta in [1, 100, 1000, 10000] { - let integers_usize = generate_integers(2000, delta); - assert_eq!(integers_usize.len(), 2000); - - let integers_u64 = integers_usize.iter().map(|v| *v as u64).collect::>(); - assert_eq!(integers_u64.len(), 2000); - - group.bench_function(BenchmarkId::new("Elias-Fano", delta), |b| { - b.iter_batched( - || { - let (index, element) = - integers_usize.iter().enumerate().choose(&mut thread_rng()).unwrap(); - (elias_fano::IntegerList::new_pre_sorted(&integers_usize).0, index, *element) - }, - |(list, index, element)| { - let list = list.enable_rank(); - list.rank(element); - list.select(index); - }, - BatchSize::PerIteration, - ); - }); - - group.bench_function(BenchmarkId::new("Roaring Bitmaps", delta), |b| { - b.iter_batched( - || { - let (index, element) = - integers_u64.iter().enumerate().choose(&mut thread_rng()).unwrap(); - ( - reth_primitives::IntegerList::new_pre_sorted(&integers_u64), - index as u64, - *element, - ) - }, - |(list, index, element)| { - list.rank(element); - list.select(index); - }, - BatchSize::PerIteration, - ); - }); - } -} - -fn generate_integers(n: usize, delta: usize) -> Vec { - (0..n).fold(Vec::new(), |mut vec, _| { - vec.push(vec.last().map_or(0, |last| { - last + thread_rng().gen_range(delta - delta / 2..=delta + delta / 2) - })); - vec - }) -} - -criterion_group! { - name = benches; - config = Criterion::default(); - targets = new_pre_sorted, rank_select -} -criterion_main!(benches); - -/// Implementation from -/// adapted to work with `sucds = "0.8.1"` -#[allow(unused, unreachable_pub)] -mod elias_fano { - use std::{fmt, ops::Deref}; - use sucds::{mii_sequences::EliasFano, Serializable}; - - #[derive(Clone, PartialEq, Eq, Default)] - pub struct IntegerList(pub EliasFano); - - impl Deref for IntegerList { - type Target = EliasFano; - - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - impl fmt::Debug for IntegerList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let vec: Vec = self.0.iter(0).collect(); - write!(f, "IntegerList {vec:?}") - } - } - - impl IntegerList { - /// Creates an `IntegerList` from a list of integers. `usize` is safe to use since - /// [`sucds::EliasFano`] restricts its compilation to 64bits. - /// - /// # Returns - /// - /// Returns an error if the list is empty or not pre-sorted. - pub fn new>(list: T) -> Result { - let mut builder = EliasFanoBuilder::new( - list.as_ref().iter().max().map_or(0, |max| max + 1), - list.as_ref().len(), - ) - .map_err(|err| EliasFanoError::InvalidInput(err.to_string()))?; - builder.extend(list.as_ref().iter().copied()); - Ok(Self(builder.build())) - } - - // Creates an IntegerList from a pre-sorted list of integers. `usize` is safe to use since - /// [`sucds::EliasFano`] restricts its compilation to 64bits. - /// - /// # Panics - /// - /// Panics if the list is empty or not pre-sorted. - pub fn new_pre_sorted>(list: T) -> Self { - Self::new(list).expect("IntegerList must be pre-sorted and non-empty.") - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_bytes(&self) -> Vec { - let mut vec = Vec::with_capacity(self.0.size_in_bytes()); - self.0.serialize_into(&mut vec).expect("not able to encode integer list."); - vec - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_mut_bytes(&self, buf: &mut B) { - let len = self.0.size_in_bytes(); - let mut vec = Vec::with_capacity(len); - self.0.serialize_into(&mut vec).unwrap(); - buf.put_slice(vec.as_slice()); - } - - /// Deserializes a sequence of bytes into a proper [`IntegerList`]. - pub fn from_bytes(data: &[u8]) -> Result { - Ok(Self( - EliasFano::deserialize_from(data).map_err(|_| EliasFanoError::FailedDeserialize)?, - )) - } - } - - macro_rules! impl_uint { - ($($w:tt),+) => { - $( - impl From> for IntegerList { - fn from(v: Vec<$w>) -> Self { - let v: Vec = v.iter().map(|v| *v as usize).collect(); - Self::new(v.as_slice()).expect("could not create list.") - } - } - )+ - }; - } - - impl_uint!(usize, u64, u32, u8, u16); - - impl Serialize for IntegerList { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let vec = self.0.iter(0).collect::>(); - let mut seq = serializer.serialize_seq(Some(self.len()))?; - for e in vec { - seq.serialize_element(&e)?; - } - seq.end() - } - } - - struct IntegerListVisitor; - impl<'de> Visitor<'de> for IntegerListVisitor { - type Value = IntegerList; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("a usize array") - } - - fn visit_seq(self, mut seq: E) -> Result - where - E: SeqAccess<'de>, - { - let mut list = Vec::new(); - while let Some(item) = seq.next_element()? { - list.push(item); - } - - IntegerList::new(list) - .map_err(|_| serde::de::Error::invalid_value(Unexpected::Seq, &self)) - } - } - - impl<'de> Deserialize<'de> for IntegerList { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_byte_buf(IntegerListVisitor) - } - } - - #[cfg(any(test, feature = "arbitrary"))] - use arbitrary::{Arbitrary, Unstructured}; - use serde::{ - de::{SeqAccess, Unexpected, Visitor}, - ser::SerializeSeq, - Deserialize, Deserializer, Serialize, Serializer, - }; - use sucds::mii_sequences::EliasFanoBuilder; - - #[cfg(any(test, feature = "arbitrary"))] - impl<'a> Arbitrary<'a> for IntegerList { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - let mut nums: Vec = Vec::arbitrary(u)?; - nums.sort(); - Self::new(&nums).map_err(|_| arbitrary::Error::IncorrectFormat) - } - } - - /// Primitives error type. - #[derive(Debug, thiserror::Error)] - pub enum EliasFanoError { - /// The provided input is invalid. - #[error("{0}")] - InvalidInput(String), - /// Failed to deserialize data into type. - #[error("failed to deserialize data into type")] - FailedDeserialize, - } -} diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index ec62353fb688..622168bb35f8 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,7 +1,7 @@ #![allow(missing_docs)] +use alloy_eips::eip4844::env_settings::EnvKzgSettings; use alloy_primitives::hex; -use c_kzg::KzgSettings; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; @@ -10,11 +10,9 @@ use proptest::{ strategy::ValueTree, test_runner::{RngAlgorithm, TestRng, TestRunner}, }; -use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, BlobTransactionSidecar, TxEip4844, -}; +use proptest_arbitrary_interop::arb; +use reth_primitives::{BlobTransactionSidecar, TxEip4844}; use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; -use std::sync::Arc; // constant seed to use for the rng const SEED: [u8; 32] = hex!("1337133713371337133713371337133713371337133713371337133713371337"); @@ -22,11 +20,10 @@ const SEED: [u8; 32] = hex!("133713371337133713371337133713371337133713371337133 /// Benchmarks EIP-48444 blob validation. fn blob_validation(c: &mut Criterion) { let mut group = c.benchmark_group("Blob Transaction KZG validation"); - let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); for num_blobs in 1..=MAX_BLOB_NUMBER_PER_BLOCK { println!("Benchmarking validation for tx with {num_blobs} blobs"); - validate_blob_tx(&mut group, "ValidateBlob", num_blobs, kzg_settings.clone()); + validate_blob_tx(&mut group, "ValidateBlob", num_blobs, EnvKzgSettings::Default); } } @@ -34,7 +31,7 @@ fn validate_blob_tx( group: &mut BenchmarkGroup<'_, WallTime>, description: &str, num_blobs: u64, - kzg_settings: Arc, + kzg_settings: EnvKzgSettings, ) { let setup = || { let config = ProptestConfig::default(); @@ -42,13 +39,13 @@ fn validate_blob_tx( let mut runner = TestRunner::new_with_rng(config, rng); // generate tx and sidecar - let mut tx = any::().new_tree(&mut runner).unwrap().current(); + let mut tx = arb::().new_tree(&mut runner).unwrap().current(); let mut blob_sidecar = - any::().new_tree(&mut runner).unwrap().current(); + arb::().new_tree(&mut runner).unwrap().current(); while blob_sidecar.blobs.len() < num_blobs as usize { let blob_sidecar_ext = - any::().new_tree(&mut runner).unwrap().current(); + arb::().new_tree(&mut runner).unwrap().current(); // extend the sidecar with the new blobs blob_sidecar.blobs.extend(blob_sidecar_ext.blobs); @@ -72,7 +69,9 @@ fn validate_blob_tx( // for now we just use the default SubPoolLimit group.bench_function(group_id, |b| { b.iter_with_setup(setup, |(tx, blob_sidecar)| { - if let Err(err) = std::hint::black_box(tx.validate_blob(&blob_sidecar, &kzg_settings)) { + if let Err(err) = + std::hint::black_box(tx.validate_blob(&blob_sidecar, kzg_settings.get())) + { println!("Validation failed: {err:?}"); } }); diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs deleted file mode 100644 index fead77789fa1..000000000000 --- a/crates/primitives/src/account.rs +++ /dev/null @@ -1,162 +0,0 @@ -use crate::revm_primitives::{Bytecode as RevmBytecode, Bytes}; -use byteorder::{BigEndian, ReadBytesExt}; -use bytes::Buf; -use reth_codecs::Compact; -use revm_primitives::JumpTable; -use serde::{Deserialize, Serialize}; -use std::ops::Deref; - -pub use reth_primitives_traits::Account; - -/// Bytecode for an account. -/// -/// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] -pub struct Bytecode(pub RevmBytecode); - -impl Bytecode { - /// Create new bytecode from raw bytes. - /// - /// No analysis will be performed. - pub fn new_raw(bytes: Bytes) -> Self { - Self(RevmBytecode::new_raw(bytes)) - } -} - -impl Deref for Bytecode { - type Target = RevmBytecode; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Compact for Bytecode { - fn to_compact(self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let bytecode = &self.0.bytecode()[..]; - buf.put_u32(bytecode.len() as u32); - buf.put_slice(bytecode); - let len = match &self.0 { - RevmBytecode::LegacyRaw(_) => { - buf.put_u8(0); - 1 - } - // `1` has been removed. - RevmBytecode::LegacyAnalyzed(analyzed) => { - buf.put_u8(2); - buf.put_u64(analyzed.original_len() as u64); - let map = analyzed.jump_table().as_slice(); - buf.put_slice(map); - 1 + 8 + map.len() - } - RevmBytecode::Eof(_) => { - // buf.put_u8(3); - // TODO(EOF) - todo!("EOF") - } - }; - len + bytecode.len() + 4 - } - - // # Panics - // - // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the - // database. - fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { - let len = buf.read_u32::().expect("could not read bytecode length"); - let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); - let variant = buf.read_u8().expect("could not read bytecode variant"); - let decoded = match variant { - 0 => Self(RevmBytecode::new_raw(bytes)), - 1 => unreachable!("Junk data in database: checked Bytecode variant was removed"), - 2 => Self(unsafe { - RevmBytecode::new_analyzed( - bytes, - buf.read_u64::().unwrap() as usize, - JumpTable::from_slice(buf), - ) - }), - // TODO(EOF) - 3 => todo!("EOF"), - _ => unreachable!("Junk data in database: unknown Bytecode variant"), - }; - (decoded, &[]) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{hex_literal::hex, B256, KECCAK_EMPTY, U256}; - use revm_primitives::LegacyAnalyzedBytecode; - - #[test] - fn test_account() { - let mut buf = vec![]; - let mut acc = Account::default(); - let len = acc.to_compact(&mut buf); - assert_eq!(len, 2); - - acc.balance = U256::from(2); - let len = acc.to_compact(&mut buf); - assert_eq!(len, 3); - - acc.nonce = 2; - let len = acc.to_compact(&mut buf); - assert_eq!(len, 4); - } - - #[test] - fn test_empty_account() { - let mut acc = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None }; - // Nonce 0, balance 0, and bytecode hash set to None is considered empty. - assert!(acc.is_empty()); - - acc.bytecode_hash = Some(KECCAK_EMPTY); - // Nonce 0, balance 0, and bytecode hash set to KECCAK_EMPTY is considered empty. - assert!(acc.is_empty()); - - acc.balance = U256::from(2); - // Non-zero balance makes it non-empty. - assert!(!acc.is_empty()); - - acc.balance = U256::ZERO; - acc.nonce = 10; - // Non-zero nonce makes it non-empty. - assert!(!acc.is_empty()); - - acc.nonce = 0; - acc.bytecode_hash = Some(B256::from(U256::ZERO)); - // Non-empty bytecode hash makes it non-empty. - assert!(!acc.is_empty()); - } - - #[test] - fn test_bytecode() { - let mut buf = vec![]; - let bytecode = Bytecode::new_raw(Bytes::default()); - let len = bytecode.to_compact(&mut buf); - assert_eq!(len, 5); - - let mut buf = vec![]; - let bytecode = Bytecode::new_raw(Bytes::from(&hex!("ffff"))); - let len = bytecode.to_compact(&mut buf); - assert_eq!(len, 7); - - let mut buf = vec![]; - let bytecode = Bytecode(RevmBytecode::LegacyAnalyzed(LegacyAnalyzedBytecode::new( - Bytes::from(&hex!("ffff")), - 2, - JumpTable::from_slice(&[0]), - ))); - let len = bytecode.clone().to_compact(&mut buf); - assert_eq!(len, 16); - - let (decoded, remainder) = Bytecode::from_compact(&buf, len); - assert_eq!(decoded, bytecode); - assert!(remainder.is_empty()); - } -} diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 6839d713857d..d93e0b7b108d 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,13 +1,16 @@ //! Common conversions from alloy types. use crate::{ - constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, Header, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, - TxLegacy, TxType, + constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, Signature, Transaction, + TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, + TxType, }; use alloy_primitives::TxKind; use alloy_rlp::Error as RlpError; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + impl TryFrom for Block { type Error = alloy_rpc_types::ConversionError; @@ -61,54 +64,6 @@ impl TryFrom for Block { } } -impl TryFrom for Header { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(header: alloy_rpc_types::Header) -> Result { - use alloy_rpc_types::ConversionError; - - Ok(Self { - base_fee_per_gas: header - .base_fee_per_gas - .map(|base_fee_per_gas| { - base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion) - }) - .transpose()?, - beneficiary: header.miner, - blob_gas_used: header - .blob_gas_used - .map(|blob_gas_used| { - blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion) - }) - .transpose()?, - difficulty: header.difficulty, - excess_blob_gas: header - .excess_blob_gas - .map(|excess_blob_gas| { - excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion) - }) - .transpose()?, - extra_data: header.extra_data, - gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?, - gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?, - logs_bloom: header.logs_bloom, - mix_hash: header.mix_hash.unwrap_or_default(), - nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0), - number: header.number.ok_or(ConversionError::MissingBlockNumber)?, - ommers_hash: header.uncles_hash, - parent_beacon_block_root: header.parent_beacon_block_root, - parent_hash: header.parent_hash, - receipts_root: header.receipts_root, - state_root: header.state_root, - timestamp: header.timestamp, - transactions_root: header.transactions_root, - withdrawals_root: header.withdrawals_root, - // TODO: requests_root: header.requests_root, - requests_root: None, - }) - } -} - impl TryFrom for Transaction { type Error = alloy_rpc_types::ConversionError; @@ -213,7 +168,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - placeholder: tx.to.map(|_| ()), + placeholder: tx.to.map(drop), to: tx.to.unwrap_or_default(), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, @@ -227,7 +182,27 @@ impl TryFrom for Transaction { })) } #[cfg(feature = "optimism")] - Some(TxType::Deposit) => todo!(), + Some(TxType::Deposit) => { + let fields = tx + .other + .deserialize_into::() + .map_err(|e| ConversionError::Custom(e.to_string()))?; + Ok(Self::Deposit(crate::transaction::TxDeposit { + source_hash: fields + .source_hash + .ok_or_else(|| ConversionError::Custom("MissingSourceHash".to_string()))?, + from: tx.from, + to: TxKind::from(tx.to), + mint: fields.mint.map(|n| n.to::()).filter(|n| *n != 0), + value: tx.value, + gas_limit: tx + .gas + .try_into() + .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, + is_system_transaction: fields.is_system_tx.unwrap_or(false), + input: tx.input, + })) + } } } } @@ -292,3 +267,113 @@ impl TryFrom for Signature { Ok(Self { r: signature.r, s: signature.s, odd_y_parity }) } } + +#[cfg(test)] +#[cfg(feature = "optimism")] +mod tests { + use super::*; + use alloy_primitives::{B256, U256}; + use alloy_rpc_types::Transaction as AlloyTransaction; + use revm_primitives::{address, Address}; + + #[test] + fn optimism_deposit_tx_conversion_no_mint() { + let input = r#"{ + "blockHash": "0xef664d656f841b5ad6a2b527b963f1eb48b97d7889d742f6cbff6950388e24cd", + "blockNumber": "0x73a78fd", + "depositReceiptVersion": "0x1", + "from": "0x36bde71c97b33cc4729cf772ae268934f7ab70b2", + "gas": "0xc27a8", + "gasPrice": "0x0", + "hash": "0x0bf1845c5d7a82ec92365d5027f7310793d53004f3c86aa80965c67bf7e7dc80", + "input": "0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5400000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a12000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e40166a07a0000000000000000000000000994206dfe8de6ec6920ff4d779b0d950605fb53000000000000000000000000d533a949740bb3306d119cc777fa900ba034cd52000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000000000000000000000000216614199391dbba2ba00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "mint": "0x0", + "nonce": "0x74060", + "r": "0x0", + "s": "0x0", + "sourceHash": "0x074adb22f2e6ed9bdd31c52eefc1f050e5db56eb85056450bccd79a6649520b3", + "to": "0x4200000000000000000000000000000000000007", + "transactionIndex": "0x1", + "type": "0x7e", + "v": "0x0", + "value": "0x0" + }"#; + let alloy_tx: AlloyTransaction = + serde_json::from_str(input).expect("failed to deserialize"); + + let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + if let Transaction::Deposit(deposit_tx) = reth_tx { + assert_eq!( + deposit_tx.source_hash, + "0x074adb22f2e6ed9bdd31c52eefc1f050e5db56eb85056450bccd79a6649520b3" + .parse::() + .unwrap() + ); + assert_eq!( + deposit_tx.from, + "0x36bde71c97b33cc4729cf772ae268934f7ab70b2".parse::
().unwrap() + ); + assert_eq!( + deposit_tx.to, + TxKind::from(address!("4200000000000000000000000000000000000007")) + ); + assert_eq!(deposit_tx.mint, None); + assert_eq!(deposit_tx.value, U256::ZERO); + assert_eq!(deposit_tx.gas_limit, 796584); + assert!(!deposit_tx.is_system_transaction); + } else { + panic!("Expected Deposit transaction"); + } + } + + #[test] + fn optimism_deposit_tx_conversion_mint() { + let input = r#"{ + "blockHash": "0x7194f63b105e93fb1a27c50d23d62e422d4185a68536c55c96284911415699b2", + "blockNumber": "0x73a82cc", + "depositReceiptVersion": "0x1", + "from": "0x36bde71c97b33cc4729cf772ae268934f7ab70b2", + "gas": "0x7812e", + "gasPrice": "0x0", + "hash": "0xf7e83886d3c6864f78e01c453ebcd57020c5795d96089e8f0e0b90a467246ddb", + "input": "0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5f00000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000239c2e16a5ca5900000000000000000000000000000000000000000000000000000000000000030d4000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e41635f5fd0000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000000000000000000000000000239c2e16a5ca590000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "mint": "0x239c2e16a5ca590000", + "nonce": "0x7406b", + "r": "0x0", + "s": "0x0", + "sourceHash": "0xe0358cd2b2686d297c5c859646a613124a874fb9d9c4a2c88636a46a65c06e48", + "to": "0x4200000000000000000000000000000000000007", + "transactionIndex": "0x1", + "type": "0x7e", + "v": "0x0", + "value": "0x239c2e16a5ca590000" + }"#; + let alloy_tx: AlloyTransaction = + serde_json::from_str(input).expect("failed to deserialize"); + + let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + + if let Transaction::Deposit(deposit_tx) = reth_tx { + assert_eq!( + deposit_tx.source_hash, + "0xe0358cd2b2686d297c5c859646a613124a874fb9d9c4a2c88636a46a65c06e48" + .parse::() + .unwrap() + ); + assert_eq!( + deposit_tx.from, + "0x36bde71c97b33cc4729cf772ae268934f7ab70b2".parse::
().unwrap() + ); + assert_eq!( + deposit_tx.to, + TxKind::from(address!("4200000000000000000000000000000000000007")) + ); + assert_eq!(deposit_tx.mint, Some(656890000000000000000)); + assert_eq!(deposit_tx.value, U256::from(0x239c2e16a5ca590000_u128)); + assert_eq!(deposit_tx.gas_limit, 491822); + assert!(!deposit_tx.is_system_transaction); + } else { + panic!("Expected Deposit transaction"); + } + } +} diff --git a/crates/primitives/src/basefee.rs b/crates/primitives/src/basefee.rs index b886b41e9299..aa52b02a035e 100644 --- a/crates/primitives/src/basefee.rs +++ b/crates/primitives/src/basefee.rs @@ -3,109 +3,3 @@ // re-export #[doc(inline)] pub use alloy_eips::eip1559::calc_next_block_base_fee; - -#[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "optimism")] - use crate::chain::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS}; - - #[test] - fn calculate_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1125000000, 1083333333, 1053571428, 1179939062, 1116028649, 918084097, 1063811730, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - crate::BaseFeeParams::ethereum(), - ) as u64 - ); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn calculate_optimism_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - OP_BASE_FEE_PARAMS, - ) as u64 - ); - } - } - - #[cfg(feature = "optimism")] - #[test] - fn calculate_optimism_sepolia_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1180000000, 1146666666, 1122857142, 1244299375, 1189416692, 1028254188, 1144836295, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - OP_SEPOLIA_BASE_FEE_PARAMS, - ) as u64 - ); - } - } -} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index affab87c8b7b..774eb979a15e 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,17 +1,22 @@ use crate::{ - Address, Bytes, GotExpected, Header, Requests, SealedHeader, TransactionSigned, + Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; +pub use alloy_eips::eip1898::{ + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, +}; use alloy_rlp::{RlpDecodable, RlpEncodable}; +use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] -use proptest::prelude::{any, prop_compose}; -use reth_codecs::derive_arbitrary; +use proptest::prelude::prop_compose; +use reth_codecs::{add_arbitrary_tests, derive_arbitrary}; +#[cfg(any(test, feature = "arbitrary"))] +pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; +use reth_primitives_traits::Requests; use serde::{Deserialize, Serialize}; -use std::ops::Deref; -pub use alloy_eips::eip1898::{ - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, -}; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; // HACK(onbjerg): we need this to always set `requests` to `None` since we might otherwise generate // a block with `None` withdrawals and `Some` requests, in which case we end up trying to decode the @@ -26,37 +31,22 @@ prop_compose! { /// Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[derive_arbitrary(rlp, 25)] +#[add_arbitrary_tests(rlp, 25)] #[derive( - Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, + Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, RlpEncodable, RlpDecodable, )] #[rlp(trailing)] pub struct Block { /// Block header. - #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "valid_header_strategy()"))] + #[deref] pub header: Header, /// Transactions in this block. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=100)" - ) - )] pub body: Vec, /// Ommers/uncles header. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest(strategy = "proptest::collection::vec(valid_header_strategy(), 0..=2)") - )] pub ommers: Vec
, /// Block withdrawals. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest(strategy = "proptest::option::of(proptest::arbitrary::any::())") - )] pub withdrawals: Option, /// Block requests. - #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "empty_requests_strategy()"))] pub requests: Option, } @@ -175,23 +165,40 @@ impl Block { pub fn size(&self) -> usize { self.header.size() + // take into account capacity - self.body.iter().map(TransactionSigned::size).sum::() + self.body.capacity() * std::mem::size_of::() + - self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * std::mem::size_of::
() + - self.withdrawals.as_ref().map_or(std::mem::size_of::>(), Withdrawals::total_size) + self.body.iter().map(TransactionSigned::size).sum::() + self.body.capacity() * core::mem::size_of::() + + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * core::mem::size_of::
() + + self.withdrawals.as_ref().map_or(core::mem::size_of::>(), Withdrawals::total_size) } } -impl Deref for Block { - type Target = Header; - fn deref(&self) -> &Self::Target { - &self.header +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for Block { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + // first generate up to 100 txs + let transactions = (0..100) + .map(|_| TransactionSigned::arbitrary(u)) + .collect::>>()?; + + // then generate up to 2 ommers + let ommers = (0..2).map(|_| Header::arbitrary(u)).collect::>>()?; + + Ok(Self { + header: u.arbitrary()?, + body: transactions, + ommers, + // for now just generate empty requests, see HACK above + requests: u.arbitrary()?, + withdrawals: u.arbitrary()?, + }) } } /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)] pub struct BlockWithSenders { /// Block + #[deref] + #[deref_mut] pub block: Block, /// List of senders that match the transactions in the block pub senders: Vec
, @@ -253,53 +260,36 @@ impl BlockWithSenders { } } -impl Deref for BlockWithSenders { - type Target = Block; - fn deref(&self) -> &Self::Target { - &self.block - } -} - -#[cfg(any(test, feature = "test-utils"))] -impl std::ops::DerefMut for BlockWithSenders { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.block - } -} - /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[derive_arbitrary(rlp)] +#[derive_arbitrary(rlp 32)] #[derive( - Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, + Debug, + Clone, + PartialEq, + Eq, + Default, + Serialize, + Deserialize, + Deref, + DerefMut, + RlpEncodable, + RlpDecodable, )] #[rlp(trailing)] pub struct SealedBlock { /// Locked block header. + #[deref] + #[deref_mut] pub header: SealedHeader, /// Transactions with signatures. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=100)" - ) - )] pub body: Vec, /// Ommer/uncle headers - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest(strategy = "proptest::collection::vec(valid_header_strategy(), 0..=2)") - )] pub ommers: Vec
, /// Block withdrawals. - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest(strategy = "proptest::option::of(proptest::arbitrary::any::())") - )] pub withdrawals: Option, /// Block requests. - #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "empty_requests_strategy()"))] pub requests: Option, } @@ -381,6 +371,43 @@ impl SealedBlock { } } + /// Transform into a [`SealedBlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + #[track_caller] + pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`SealedBlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`TransactionSigned::recover_signer_unchecked`] + /// + /// Returns an error if a signature is invalid. + #[track_caller] + pub fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result { + let senders = if self.body.len() == senders.len() { + senders + } else { + let Some(senders) = + TransactionSigned::recover_signers_unchecked(&self.body, self.body.len()) + else { + return Err(self) + }; + senders + }; + + Ok(SealedBlockWithSenders { block: self, senders }) + } + /// Unseal the block pub fn unseal(self) -> Block { Block { @@ -397,9 +424,9 @@ impl SealedBlock { pub fn size(&self) -> usize { self.header.size() + // take into account capacity - self.body.iter().map(TransactionSigned::size).sum::() + self.body.capacity() * std::mem::size_of::() + - self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * std::mem::size_of::
() + - self.withdrawals.as_ref().map_or(std::mem::size_of::>(), Withdrawals::total_size) + self.body.iter().map(TransactionSigned::size).sum::() + self.body.capacity() * core::mem::size_of::() + + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * core::mem::size_of::
() + + self.withdrawals.as_ref().map_or(core::mem::size_of::>(), Withdrawals::total_size) } /// Calculates the total gas used by blob transactions in the sealed block. @@ -450,24 +477,12 @@ impl From for Block { } } -impl Deref for SealedBlock { - type Target = SealedHeader; - fn deref(&self) -> &Self::Target { - &self.header - } -} - -#[cfg(any(test, feature = "test-utils"))] -impl std::ops::DerefMut for SealedBlock { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.header - } -} - /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] pub struct SealedBlockWithSenders { /// Sealed block + #[deref] + #[deref_mut] pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, @@ -521,47 +536,22 @@ impl SealedBlockWithSenders { } } -impl Deref for SealedBlockWithSenders { - type Target = SealedBlock; - fn deref(&self) -> &Self::Target { - &self.block - } -} - -#[cfg(any(test, feature = "test-utils"))] -impl std::ops::DerefMut for SealedBlockWithSenders { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.block - } -} - /// A response to `GetBlockBodies`, containing bodies if any bodies were found. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[derive_arbitrary(rlp, 10)] +#[add_arbitrary_tests(rlp, 10)] #[derive( Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, )] #[rlp(trailing)] pub struct BlockBody { /// Transactions in the block - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=100)" - ) - )] pub transactions: Vec, /// Uncle headers for the given block - #[cfg_attr( - any(test, feature = "arbitrary"), - proptest(strategy = "proptest::collection::vec(valid_header_strategy(), 0..=2)") - )] pub ommers: Vec
, /// Withdrawals in the block. pub withdrawals: Option, /// Requests in the block. - #[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "empty_requests_strategy()"))] pub requests: Option, } @@ -604,12 +594,12 @@ impl BlockBody { #[inline] pub fn size(&self) -> usize { self.transactions.iter().map(TransactionSigned::size).sum::() + - self.transactions.capacity() * std::mem::size_of::() + + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + - self.ommers.capacity() * std::mem::size_of::
() + + self.ommers.capacity() * core::mem::size_of::
() + self.withdrawals .as_ref() - .map_or(std::mem::size_of::>(), Withdrawals::total_size) + .map_or(core::mem::size_of::>(), Withdrawals::total_size) } } @@ -624,66 +614,19 @@ impl From for BlockBody { } } -/// Generates a header which is valid __with respect to past and future forks__. This means, for -/// example, that if the withdrawals root is present, the base fee per gas is also present. -/// -/// If blob gas used were present, then the excess blob gas and parent beacon block root are also -/// present. In this example, the withdrawals root would also be present. -/// -/// This __does not, and should not guarantee__ that the header is valid with respect to __anything -/// else__. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_valid_header( - mut header: Header, - eip_4844_active: bool, - blob_gas_used: u64, - excess_blob_gas: u64, - parent_beacon_block_root: B256, -) -> Header { - // EIP-1559 logic - if header.base_fee_per_gas.is_none() { - // If EIP-1559 is not active, clear related fields - header.withdrawals_root = None; - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if header.withdrawals_root.is_none() { - // If EIP-4895 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if eip_4844_active { - // Set fields based on EIP-4844 being active - header.blob_gas_used = Some(blob_gas_used); - header.excess_blob_gas = Some(excess_blob_gas); - header.parent_beacon_block_root = Some(parent_beacon_block_root); - } else { - // If EIP-4844 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } - - // todo(onbjerg): adjust this for eip-7589 - header.requests_root = None; - - header -} - #[cfg(any(test, feature = "arbitrary"))] -prop_compose! { - /// Generates a proptest strategy for constructing an instance of a header which is valid __with - /// respect to past and future forks__. - /// - /// See docs for [generate_valid_header] for more information. - pub fn valid_header_strategy()( - header in any::
(), - eip_4844_active in any::(), - blob_gas_used in any::(), - excess_blob_gas in any::(), - parent_beacon_block_root in any::() - ) -> Header { - generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) +impl<'a> arbitrary::Arbitrary<'a> for BlockBody { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + // first generate up to 100 txs + let transactions = (0..100) + .map(|_| TransactionSigned::arbitrary(u)) + .collect::>>()?; + + // then generate up to 2 ommers + let ommers = (0..2).map(|_| Header::arbitrary(u)).collect::>>()?; + + // for now just generate empty requests, see HACK above + Ok(Self { transactions, ommers, requests: None, withdrawals: u.arbitrary()? }) } } diff --git a/crates/primitives/src/compression/mod.rs b/crates/primitives/src/compression/mod.rs index c672f25d3742..4b039b5a627f 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/primitives/src/compression/mod.rs @@ -1,6 +1,9 @@ use std::{cell::RefCell, thread_local}; use zstd::bulk::{Compressor, Decompressor}; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// Compression/Decompression dictionary for `Receipt`. pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("./receipt_dictionary.bin"); /// Compression/Decompression dictionary for `Transaction`. diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 3379a9e48ed8..48a3aebb3c32 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -11,19 +11,7 @@ pub use alloy_eips::eip4844::{ #[cfg(feature = "c-kzg")] mod trusted_setup { use crate::kzg::KzgSettings; - use once_cell::sync::Lazy; - use std::{io::Write, sync::Arc}; - - /// KZG trusted setup - pub static MAINNET_KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { - Arc::new( - c_kzg::KzgSettings::load_trusted_setup( - &revm_primitives::kzg::G1_POINTS.0, - &revm_primitives::kzg::G2_POINTS.0, - ) - .expect("failed to load trusted setup"), - ) - }); + use std::io::Write; /// Loads the trusted setup parameters from the given bytes and returns the [`KzgSettings`]. /// @@ -38,7 +26,7 @@ mod trusted_setup { } /// Error type for loading the trusted setup. - #[derive(Debug, thiserror::Error)] + #[derive(Debug, thiserror_no_std::Error)] pub enum LoadKzgSettingsError { /// Failed to create temp file to store bytes for loading [`KzgSettings`] via /// [`KzgSettings::load_trusted_setup_file`]. @@ -48,14 +36,4 @@ mod trusted_setup { #[error("KZG error: {0:?}")] KzgError(#[from] c_kzg::Error), } - - #[cfg(test)] - mod tests { - use super::*; - - #[test] - fn ensure_load_kzg_settings() { - let _settings = Arc::clone(&MAINNET_KZG_TRUSTED_SETUP); - } - } } diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index 2da0554082d8..fd1dc1586248 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,218 +1,6 @@ //! Ethereum protocol-related constants -use crate::{ - chain::DepositContract, - revm_primitives::{address, b256}, - B256, U256, -}; -use std::time::Duration; - -#[cfg(feature = "optimism")] -use crate::chain::BaseFeeParams; +pub use reth_primitives_traits::constants::*; /// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. pub mod eip4844; - -/// The client version: `reth/v{major}.{minor}.{patch}` -pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); - -/// The first four bytes of the call data for a function call specifies the function to be called. -pub const SELECTOR_LEN: usize = 4; - -/// Maximum extra data size in a block after genesis -pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32; - -/// An EPOCH is a series of 32 slots. -pub const EPOCH_SLOTS: u64 = 32; - -/// The duration of a slot in seconds. -/// -/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a -/// block. -/// WVM: 2s -pub const SLOT_DURATION: Duration = Duration::from_secs(2); - -/// An EPOCH is a series of 32 slots (~6.4min). -pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); - -/// The default block nonce in the beacon consensus -pub const BEACON_NONCE: u64 = 0u64; - -/// The default Ethereum block gas limit. -// TODO: This should be a chain spec parameter. -/// See . -/// WVM: 300kk -pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 300_000_000; - -/// The minimum tx fee below which the txpool will reject the transaction. -/// -/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 -/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` -/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because -/// 12.5% of 7 is less than 1. -/// -/// Note that min base fee under different 1559 parameterizations may differ, but there's no -/// significant harm in leaving this setting as is. -pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; - -/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256. -pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); - -/// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000; - -/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; - -/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2; - -/// Minimum gas limit allowed for transactions. -pub const MINIMUM_GAS_LIMIT: u64 = 5000; - -/// Deposit contract address -pub const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), - 11052984, - b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), -); - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -#[cfg(feature = "optimism")] -pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon -/// hardfork. -#[cfg(feature = "optimism")] -pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -#[cfg(feature = "optimism")] -pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -#[cfg(feature = "optimism")] -pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon -/// hardfork. -#[cfg(feature = "optimism")] -pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -#[cfg(feature = "optimism")] -pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; - -/// Get the base fee parameters for Optimism Sepolia. -#[cfg(feature = "optimism")] -pub const OP_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { - max_change_denominator: OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, - elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, -}; - -/// Get the base fee parameters for Optimism Sepolia (post Canyon). -#[cfg(feature = "optimism")] -pub const OP_SEPOLIA_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { - max_change_denominator: OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, - elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, -}; - -/// Get the base fee parameters for Optimism Mainnet. -#[cfg(feature = "optimism")] -pub const OP_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { - max_change_denominator: OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, - elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, -}; - -/// Get the base fee parameters for Optimism Mainnet (post Canyon). -#[cfg(feature = "optimism")] -pub const OP_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { - max_change_denominator: OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, - elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, -}; - -/// Multiplier for converting gwei to wei. -pub const GWEI_TO_WEI: u64 = 1_000_000_000; - -/// Multiplier for converting finney (milliether) to wei. -pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; - -/// Multiplier for converting ether to wei. -pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; - -/// Multiplier for converting mgas to gas. -pub const MGAS_TO_GAS: u64 = 1_000_000u64; - -/// The Ethereum mainnet genesis hash. -pub const MAINNET_GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); - -/// Goerli genesis hash. -pub const GOERLI_GENESIS_HASH: B256 = - b256!("bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a"); - -/// Sepolia genesis hash. -pub const SEPOLIA_GENESIS_HASH: B256 = - b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); - -/// Holesky genesis hash. -pub const HOLESKY_GENESIS_HASH: B256 = - b256!("ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d"); - -/// Testnet genesis hash. -pub const DEV_GENESIS_HASH: B256 = - b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c"); - -/// Keccak256 over empty array. -pub const KECCAK_EMPTY: B256 = - b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); - -/// Ommer root of empty list. -pub const EMPTY_OMMER_ROOT_HASH: B256 = - b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); - -/// Root hash of an empty trie. -pub const EMPTY_ROOT_HASH: B256 = - b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"); - -/// Transactions root of empty receipts set. -pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH; - -/// Transactions root of empty transactions set. -pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH; - -/// Withdrawals root of empty withdrawals set. -pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH; - -/// The number of blocks to unwind during a reorg that already became a part of canonical chain. -/// -/// In reality, the node can end up in this particular situation very rarely. It would happen only -/// if the node process is abruptly terminated during ongoing reorg and doesn't boot back up for -/// long period of time. -/// -/// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in -/// the database. -pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; - -/// Max seconds from current time allowed for blocks, before they're considered future blocks. -/// -/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the -/// future. -/// -/// See: -/// -pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn min_protocol_sanity() { - assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); - } -} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index d9cb4d326e44..29154d591e8e 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -14,69 +14,46 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; -mod account; #[cfg(feature = "alloy-compat")] mod alloy_compat; pub mod basefee; mod block; -mod chain; #[cfg(feature = "zstd-codec")] mod compression; pub mod constants; pub mod eip4844; -mod error; pub mod genesis; -mod header; -mod integer_list; -mod log; -mod net; pub mod proofs; mod receipt; -mod request; -/// Helpers for working with revm -pub mod revm; pub use reth_static_file_types as static_file; -mod storage; pub mod transaction; -mod withdrawal; -pub use account::{Account, Bytecode}; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ Block, BlockBody, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, }; -pub use chain::{ - AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainKind, ChainSpec, - ChainSpecBuilder, DepositContract, DisplayHardforks, ForkBaseFeeParams, ForkCondition, - NamedChain, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, -}; #[cfg(feature = "zstd-codec")] pub use compression::*; pub use constants::{ - DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, GOERLI_GENESIS_HASH, HOLESKY_GENESIS_HASH, - KECCAK_EMPTY, MAINNET_DEPOSIT_CONTRACT, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, + MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }; -pub use error::{GotExpected, GotExpectedBoxed}; pub use genesis::{ChainConfig, Genesis, GenesisAccount}; -pub use header::{Header, HeaderValidationError, HeadersDirection, SealedHeader}; -pub use integer_list::IntegerList; -pub use log::{logs_bloom, Log}; -pub use net::{ - goerli_nodes, holesky_nodes, mainnet_nodes, parse_nodes, sepolia_nodes, NodeRecord, - NodeRecordParseError, TrustedPeer, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, - SEPOLIA_BOOTNODES, -}; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; -pub use request::Requests; +pub use reth_primitives_traits::{ + logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, + LogData, Request, Requests, SealedHeader, StorageEntry, Withdrawal, Withdrawals, +}; pub use static_file::StaticFileSegment; -pub use storage::StorageEntry; pub use transaction::{ BlobTransaction, BlobTransactionSidecar, FromRecoveredPooledTransaction, @@ -95,11 +72,8 @@ pub use transaction::{ LEGACY_TX_TYPE_ID, }; -pub use withdrawal::{Withdrawal, Withdrawals}; - // Re-exports pub use self::ruint::UintTryTo; -pub use alloy_consensus::Request; pub use alloy_primitives::{ self, address, b256, bloom, bytes, bytes::{Buf, BufMut, BytesMut}, @@ -136,14 +110,8 @@ pub use c_kzg as kzg; /// Optimism specific re-exports #[cfg(feature = "optimism")] mod optimism { - pub use crate::{ - chain::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}, - net::{ - base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, OP_BOOTNODES, - OP_TESTNET_BOOTNODES, - }, - transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}, - }; + pub use crate::transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}; + pub use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; } #[cfg(feature = "optimism")] diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs deleted file mode 100644 index 41fc6dfe691a..000000000000 --- a/crates/primitives/src/net.rs +++ /dev/null @@ -1,247 +0,0 @@ -pub use reth_network_peers::{NodeRecord, NodeRecordParseError, TrustedPeer}; - -// Ethereum bootnodes come from -// OP bootnodes come from - -/// Ethereum Foundation Go Bootnodes -pub static MAINNET_BOOTNODES : [&str; 4] = [ - "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001 - "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001 - "enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303", // bootnode-hetzner-hel - "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn -]; - -/// Ethereum Foundation Sepolia Bootnodes -pub static SEPOLIA_BOOTNODES : [&str; 5] = [ - "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3 - "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3 - "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1 - "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1 - "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 -]; - -/// Görli Bootnodes -pub static GOERLI_BOOTNODES : [&str; 7] = [ - // Upstream bootnodes - "enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303", - "enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303", - "enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313", - "enode://b5948a2d3e9d486c4d75bf32713221c2bd6cf86463302339299bd227dc2e276cd5a1c7ca4f43a0e9122fe9af884efed563bd2a1fd28661f3b5f5ad7bf1de5949@18.218.250.66:30303", - - // Ethereum Foundation bootnode - "enode://a61215641fb8714a373c80edbfa0ea8878243193f57c96eeb44d0bc019ef295abd4e044fd619bfc4c59731a73fb79afe84e9ab6da0c743ceb479cbb6d263fa91@3.11.147.67:30303", - - // Goerli Initiative bootnodes - "enode://d4f764a48ec2a8ecf883735776fdefe0a3949eb0ca476bd7bc8d0954a9defe8fea15ae5da7d40b5d2d59ce9524a99daedadf6da6283fca492cc80b53689fb3b3@46.4.99.122:32109", - "enode://d2b720352e8216c9efc470091aa91ddafc53e222b32780f505c817ceef69e01d5b0b0797b69db254c586f493872352f5a022b4d8479a00fc92ec55f9ad46a27e@88.99.70.182:30303", -]; - -/// Ethereum Foundation Holesky Bootnodes -pub static HOLESKY_BOOTNODES : [&str; 2] = [ - "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", - "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", -]; - -#[cfg(feature = "optimism")] -/// OP stack mainnet boot nodes. -pub static OP_BOOTNODES: &[&str] = &[ - // OP Labs - "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", - "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", - "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", - // Base - "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", - "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", - "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", - "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", - "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" -]; - -#[cfg(feature = "optimism")] -/// OP stack testnet boot nodes. -pub static OP_TESTNET_BOOTNODES: &[&str] = &[ - // OP Labs - "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", - "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", - "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", - // Base - "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", - "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", -]; - -/// Returns parsed mainnet nodes -pub fn mainnet_nodes() -> Vec { - parse_nodes(&MAINNET_BOOTNODES[..]) -} - -/// Returns parsed goerli nodes -pub fn goerli_nodes() -> Vec { - parse_nodes(&GOERLI_BOOTNODES[..]) -} - -/// Returns parsed sepolia nodes -pub fn sepolia_nodes() -> Vec { - parse_nodes(&SEPOLIA_BOOTNODES[..]) -} - -/// Returns parsed holesky nodes -pub fn holesky_nodes() -> Vec { - parse_nodes(&HOLESKY_BOOTNODES[..]) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack mainnet nodes -pub fn op_nodes() -> Vec { - parse_nodes(OP_BOOTNODES) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack testnet nodes -pub fn op_testnet_nodes() -> Vec { - parse_nodes(OP_TESTNET_BOOTNODES) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack base mainnet nodes -pub fn base_nodes() -> Vec { - parse_nodes(OP_BOOTNODES) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack base testnet nodes -pub fn base_testnet_nodes() -> Vec { - parse_nodes(OP_TESTNET_BOOTNODES) -} - -/// Parses all the nodes -pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec { - nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect() -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_rlp::Decodable; - use rand::{thread_rng, Rng, RngCore}; - use std::net::{IpAddr, Ipv4Addr}; - - #[test] - fn test_mapped_ipv6() { - let mut rng = thread_rng(); - - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - let v6 = v4.to_ipv6_mapped(); - - let record = NodeRecord { - address: v6.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_mapped_ipv4() { - let mut rng = thread_rng(); - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - - let record = NodeRecord { - address: v4.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(!record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_noderecord_codec_ipv4() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 4]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V4(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_noderecord_codec_ipv6() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 16]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V6(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_url_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(node, NodeRecord { - address: IpAddr::V4([10,3,58,6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } - - #[test] - fn test_node_display() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_display_discport() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_serialize() { - let node = NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }; - let ser = serde_json::to_string::(&node).expect("couldn't serialize"); - assert_eq!(ser, "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"") - } - - #[test] - fn test_node_deserialize() { - let url = "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\""; - let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); - assert_eq!(node, NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } -} diff --git a/crates/primitives/src/proofs/mod.rs b/crates/primitives/src/proofs.rs similarity index 86% rename from crates/primitives/src/proofs/mod.rs rename to crates/primitives/src/proofs.rs index 5555ae603d69..10a8a0a6c8fc 100644 --- a/crates/primitives/src/proofs/mod.rs +++ b/crates/primitives/src/proofs.rs @@ -1,58 +1,14 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::{ - constants::EMPTY_OMMER_ROOT_HASH, keccak256, Address, Header, Receipt, ReceiptWithBloom, - ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256, U256, + constants::EMPTY_OMMER_ROOT_HASH, keccak256, Header, Receipt, ReceiptWithBloom, + ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256, }; -use reth_trie_types::{hash_builder::HashBuilder, Nibbles}; - -mod types; -pub use types::{AccountProof, StorageProof}; -mod traits; -pub use traits::IntoTrieAccount; - use alloy_eips::eip7685::Encodable7685; -use alloy_rlp::Encodable; -use itertools::Itertools; - -/// Adjust the index of an item for rlp encoding. -pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { - if i > 0x7f { - i - } else if i == 0x7f || i + 1 == len { - 0 - } else { - i + 1 - } -} - -/// Compute a trie root of the collection of rlp encodable items. -pub fn ordered_trie_root(items: &[T]) -> B256 { - ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf)) -} - -/// Compute a trie root of the collection of items with a custom encoder. -pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 -where - F: FnMut(&T, &mut Vec), -{ - let mut value_buffer = Vec::new(); - - let mut hb = HashBuilder::default(); - let items_len = items.len(); - for i in 0..items_len { - let index = adjust_index_for_rlp(i, items_len); - - let index_buffer = alloy_rlp::encode_fixed_size(&index); - - value_buffer.clear(); - encode(&items[index], &mut value_buffer); +use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; - hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); - } - - hb.root() -} +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; /// Calculate a transaction root. /// @@ -85,7 +41,7 @@ pub fn calculate_requests_root(requests: &[Request]) -> B256 { #[cfg(feature = "optimism")] pub fn calculate_receipt_root_optimism( receipts: &[ReceiptWithBloom], - chain_spec: &crate::ChainSpec, + chain_spec: &reth_chainspec::ChainSpec, timestamp: u64, ) -> B256 { // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, @@ -93,8 +49,9 @@ pub fn calculate_receipt_root_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(crate::Hardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(crate::Hardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Regolith, timestamp) && + !chain_spec + .is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -133,7 +90,7 @@ pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { #[cfg(feature = "optimism")] pub fn calculate_receipt_root_no_memo_optimism( receipts: &[&Receipt], - chain_spec: &crate::ChainSpec, + chain_spec: &reth_chainspec::ChainSpec, timestamp: u64, ) -> B256 { // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, @@ -141,8 +98,9 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(crate::Hardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(crate::Hardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Regolith, timestamp) && + !chain_spec + .is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -175,109 +133,15 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { keccak256(ommers_rlp) } -/// Hashes and sorts account keys, then proceeds to calculating the root hash of the state -/// represented as MPT. -/// See [`state_root_unsorted`] for more info. -pub fn state_root_ref_unhashed<'a, A: IntoTrieAccount + Clone + 'a>( - state: impl IntoIterator, -) -> B256 { - state_root_unsorted( - state.into_iter().map(|(address, account)| (keccak256(address), account.clone())), - ) -} - -/// Hashes and sorts account keys, then proceeds to calculating the root hash of the state -/// represented as MPT. -/// See [`state_root_unsorted`] for more info. -pub fn state_root_unhashed( - state: impl IntoIterator, -) -> B256 { - state_root_unsorted(state.into_iter().map(|(address, account)| (keccak256(address), account))) -} - -/// Sorts the hashed account keys and calculates the root hash of the state represented as MPT. -/// See [`state_root`] for more info. -pub fn state_root_unsorted(state: impl IntoIterator) -> B256 { - state_root(state.into_iter().sorted_by_key(|(key, _)| *key)) -} - -/// Calculates the root hash of the state represented as MPT. -/// Corresponds to [geth's `deriveHash`](https://github.com/ethereum/go-ethereum/blob/6c149fd4ad063f7c24d726a73bc0546badd1bc73/core/genesis.go#L119). -/// -/// # Panics -/// -/// If the items are not in sorted order. -pub fn state_root(state: impl IntoIterator) -> B256 { - let mut hb = HashBuilder::default(); - let mut account_rlp_buf = Vec::new(); - for (hashed_key, account) in state { - account_rlp_buf.clear(); - account.to_trie_account().encode(&mut account_rlp_buf); - hb.add_leaf(Nibbles::unpack(hashed_key), &account_rlp_buf); - } - hb.root() -} - -/// Hashes storage keys, sorts them and them calculates the root hash of the storage trie. -/// See [`storage_root_unsorted`] for more info. -pub fn storage_root_unhashed(storage: impl IntoIterator) -> B256 { - storage_root_unsorted(storage.into_iter().map(|(slot, value)| (keccak256(slot), value))) -} - -/// Sorts and calculates the root hash of account storage trie. -/// See [`storage_root`] for more info. -pub fn storage_root_unsorted(storage: impl IntoIterator) -> B256 { - storage_root(storage.into_iter().sorted_by_key(|(key, _)| *key)) -} - -/// Calculates the root hash of account storage trie. -/// -/// # Panics -/// -/// If the items are not in sorted order. -pub fn storage_root(storage: impl IntoIterator) -> B256 { - let mut hb = HashBuilder::default(); - for (hashed_slot, value) in storage { - hb.add_leaf(Nibbles::unpack(hashed_slot), alloy_rlp::encode_fixed_size(&value).as_ref()); - } - hb.root() -} - -/// Implementation of hasher using our keccak256 hashing function -/// for compatibility with `triehash` crate. -#[cfg(any(test, feature = "test-utils"))] -pub mod triehash { - use super::{keccak256, B256}; - use hash_db::Hasher; - use plain_hasher::PlainHasher; - - /// A [Hasher] that calculates a keccak256 hash of the given data. - #[derive(Default, Debug, Clone, PartialEq, Eq)] - #[non_exhaustive] - pub struct KeccakHasher; - - #[cfg(any(test, feature = "test-utils"))] - impl Hasher for KeccakHasher { - type Out = B256; - type StdHasher = PlainHasher; - - const LENGTH: usize = 32; - - fn hash(x: &[u8]) -> Self::Out { - keccak256(x) - } - } -} - #[cfg(test)] mod tests { use super::*; - use crate::{ - bloom, constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, GenesisAccount, Log, TxType, - GOERLI, HOLESKY, MAINNET, SEPOLIA, - }; - use alloy_primitives::{b256, LogData}; + use crate::{bloom, constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, Log, TxType, U256}; + use alloy_genesis::GenesisAccount; + use alloy_primitives::{b256, Address, LogData}; use alloy_rlp::Decodable; + use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; + use reth_trie_common::root::{state_root_ref_unhashed, state_root_unhashed}; use std::collections::HashMap; #[test] @@ -671,14 +535,6 @@ mod tests { "mainnet state root mismatch" ); - let expected_goerli_state_root = - b256!("5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008"); - let calculated_goerli_state_root = state_root_ref_unhashed(&GOERLI.genesis.alloc); - assert_eq!( - expected_goerli_state_root, calculated_goerli_state_root, - "goerli state root mismatch" - ); - let expected_sepolia_state_root = b256!("5eb6e371a698b8d68f665192350ffcecbbbf322916f4b51bd79bb6887da3f494"); let calculated_sepolia_state_root = state_root_ref_unhashed(&SEPOLIA.genesis.alloc); diff --git a/crates/primitives/src/proofs/traits.rs b/crates/primitives/src/proofs/traits.rs deleted file mode 100644 index 7fef86944b7b..000000000000 --- a/crates/primitives/src/proofs/traits.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::Account; -use alloy_consensus::constants::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; -use alloy_genesis::GenesisAccount; -use alloy_primitives::{keccak256, B256, U256}; -use reth_trie_types::TrieAccount; -use revm_primitives::AccountInfo; - -/// Converts a type into a [`TrieAccount`]. -pub trait IntoTrieAccount { - /// Converts to this type into a [`TrieAccount`]. - fn to_trie_account(self) -> TrieAccount; -} - -impl IntoTrieAccount for GenesisAccount { - fn to_trie_account(self) -> TrieAccount { - let storage_root = self - .storage - .map(|storage| { - super::storage_root_unhashed( - storage - .into_iter() - .filter(|(_, value)| *value != B256::ZERO) - .map(|(slot, value)| (slot, U256::from_be_bytes(*value))), - ) - }) - .unwrap_or(EMPTY_ROOT_HASH); - - TrieAccount { - nonce: self.nonce.unwrap_or_default(), - balance: self.balance, - storage_root, - code_hash: self.code.map_or(KECCAK_EMPTY, keccak256), - } - } -} - -impl IntoTrieAccount for (Account, B256) { - fn to_trie_account(self) -> TrieAccount { - let (account, storage_root) = self; - TrieAccount { - nonce: account.nonce, - balance: account.balance, - storage_root, - code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), - } - } -} - -impl IntoTrieAccount for (AccountInfo, B256) { - fn to_trie_account(self) -> TrieAccount { - let (account, storage_root) = self; - TrieAccount { - nonce: account.nonce, - balance: account.balance, - storage_root, - code_hash: account.code_hash, - } - } -} diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 4f07289dc729..9144c3850563 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -4,15 +4,15 @@ use crate::{logs_bloom, Bloom, Bytes, TxType, B256}; use alloy_primitives::Log; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; -#[cfg(any(test, feature = "arbitrary"))] -use proptest::strategy::Strategy; +use core::{cmp::Ordering, ops::Deref}; +use derive_more::{Deref, DerefMut, From, IntoIterator}; #[cfg(feature = "zstd-codec")] use reth_codecs::CompactZstd; use reth_codecs::{add_arbitrary_tests, main_codec, Compact}; -use std::{ - cmp::Ordering, - ops::{Deref, DerefMut}, -}; +use serde::{Deserialize, Serialize}; + +#[cfg(not(feature = "std"))] +use alloc::{vec, vec::Vec}; /// Receipt containing result of transaction execution. #[cfg_attr(feature = "zstd-codec", main_codec(no_arbitrary, zstd))] @@ -65,7 +65,19 @@ impl Receipt { } /// A collection of receipts organized as a two-dimensional vector. -#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[derive( + Clone, + Debug, + PartialEq, + Eq, + Default, + Serialize, + Deserialize, + From, + Deref, + DerefMut, + IntoIterator, +)] pub struct Receipts { /// A two-dimensional vector of optional `Receipt` instances. pub receipt_vec: Vec>>, @@ -99,7 +111,7 @@ impl Receipts { pub fn optimism_root_slow( &self, index: usize, - chain_spec: &crate::ChainSpec, + chain_spec: &reth_chainspec::ChainSpec, timestamp: u64, ) -> Option { Some(crate::proofs::calculate_receipt_root_no_memo_optimism( @@ -110,41 +122,12 @@ impl Receipts { } } -impl From>>> for Receipts { - fn from(receipt_vec: Vec>>) -> Self { - Self { receipt_vec } - } -} - impl From> for Receipts { fn from(block_receipts: Vec) -> Self { Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] } } } -impl Deref for Receipts { - type Target = Vec>>; - - fn deref(&self) -> &Self::Target { - &self.receipt_vec - } -} - -impl DerefMut for Receipts { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receipt_vec - } -} - -impl IntoIterator for Receipts { - type Item = Vec>; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.receipt_vec.into_iter() - } -} - impl FromIterator>> for Receipts { fn from_iter>>>(iter: I) -> Self { iter.into_iter().collect::>().into() @@ -201,50 +184,6 @@ pub fn gas_spent_by_transactions>( .collect() } -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for Receipt { - type Parameters = (); - - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - use proptest::prelude::{any, prop_compose}; - - prop_compose! { - fn arbitrary_receipt()(tx_type in any::(), - success in any::(), - cumulative_gas_used in any::(), - logs in proptest::collection::vec(proptest::arbitrary::any::(), 0..=20), - _deposit_nonce in any::>(), - _deposit_receipt_version in any::>()) -> Receipt - { - // Only receipts for deposit transactions may contain a deposit nonce - #[cfg(feature = "optimism")] - let (deposit_nonce, deposit_receipt_version) = if tx_type == TxType::Deposit { - // The deposit receipt version is only present if the deposit nonce is present - let deposit_receipt_version = _deposit_nonce.and(_deposit_receipt_version); - (_deposit_nonce, deposit_receipt_version) - } else { - (None, None) - }; - - Receipt { tx_type, - success, - cumulative_gas_used, - logs, - // Only receipts for deposit transactions may contain a deposit nonce - #[cfg(feature = "optimism")] - deposit_nonce, - // Only receipts for deposit transactions may contain a deposit nonce - #[cfg(feature = "optimism")] - deposit_receipt_version - } - } - } - arbitrary_receipt().boxed() - } - - type Strategy = proptest::strategy::BoxedStrategy; -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Receipt { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs deleted file mode 100644 index 705fc188065c..000000000000 --- a/crates/primitives/src/revm/compat.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::{revm_primitives::AccountInfo, Account, Address, TxKind, KECCAK_EMPTY, U256}; -use revm::{interpreter::gas::validate_initial_tx_gas, primitives::SpecId}; - -/// Converts a Revm [`AccountInfo`] into a Reth [`Account`]. -/// -/// Sets `bytecode_hash` to `None` if `code_hash` is [`KECCAK_EMPTY`]. -pub fn into_reth_acc(revm_acc: AccountInfo) -> Account { - let code_hash = revm_acc.code_hash; - Account { - balance: revm_acc.balance, - nonce: revm_acc.nonce, - bytecode_hash: (code_hash != KECCAK_EMPTY).then_some(code_hash), - } -} - -/// Converts a Revm [`AccountInfo`] into a Reth [`Account`]. -/// -/// Sets `code_hash` to [`KECCAK_EMPTY`] if `bytecode_hash` is `None`. -pub fn into_revm_acc(reth_acc: Account) -> AccountInfo { - AccountInfo { - balance: reth_acc.balance, - nonce: reth_acc.nonce, - code_hash: reth_acc.bytecode_hash.unwrap_or(KECCAK_EMPTY), - code: None, - } -} - -/// Calculates the Intrinsic Gas usage for a Transaction -/// -/// Caution: This only checks past the Merge hardfork. -#[inline] -pub fn calculate_intrinsic_gas_after_merge( - input: &[u8], - kind: &TxKind, - access_list: &[(Address, Vec)], - is_shanghai: bool, -) -> u64 { - let spec_id = if is_shanghai { SpecId::SHANGHAI } else { SpecId::MERGE }; - validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list) -} diff --git a/crates/primitives/src/revm/mod.rs b/crates/primitives/src/revm/mod.rs deleted file mode 100644 index f3c4ac62d9f1..000000000000 --- a/crates/primitives/src/revm/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Helpers for working with revm. - -/// The `compat` module contains utility functions that perform conversions between reth and revm, -/// compare analogous types from the two implementations, and calculate intrinsic gas usage. -/// -/// The included conversion methods can be used to convert between: -/// * reth's [Log](crate::Log) type and revm's [Log](revm_primitives::Log) type. -/// * reth's [Account](crate::Account) type and revm's [`AccountInfo`](revm_primitives::AccountInfo) -/// type. -pub mod compat; - -/// Reth block execution/validation configuration and constants -pub mod config; - -/// The `env` module provides utility methods for filling revm transaction and block environments. -/// -/// It includes functions to fill transaction and block environments with relevant data, prepare -/// the block and transaction environments for system contract calls, and recover the signer from -/// Clique-formatted extra data in ethereum headers. -pub mod env; diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index acaf132c479a..22e113fbdc11 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -10,6 +10,7 @@ mod tests { use crate::{Address, B256}; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use proptest::proptest; + use proptest_arbitrary_interop::arb; use reth_codecs::{main_codec, Compact}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips @@ -18,12 +19,7 @@ mod tests { #[derive( Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodableWrapper, RlpEncodableWrapper, )] - struct RethAccessList( - #[proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" - )] - Vec, - ); + struct RethAccessList(Vec); impl PartialEq for RethAccessList { fn eq(&self, other: &AccessList) -> bool { @@ -41,9 +37,6 @@ mod tests { /// The storage keys to be loaded at the start of execution. /// /// Each key is a 32-byte value representing a specific storage slot. - #[proptest( - strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" - )] storage_keys: Vec, } @@ -55,7 +48,7 @@ mod tests { proptest!( #[test] - fn test_roundtrip_accesslist_compat(access_list: RethAccessList) { + fn test_roundtrip_accesslist_compat(access_list in arb::()) { // Convert access_list to buffer and then create alloy_access_list from buffer and // compare let mut compacted_reth_access_list = Vec::::new(); diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs new file mode 100644 index 000000000000..3dd8acf8683f --- /dev/null +++ b/crates/primitives/src/transaction/compat.rs @@ -0,0 +1,104 @@ +use crate::{Address, Transaction, TransactionSigned, TxKind, U256}; +use revm_primitives::TxEnv; + +/// Implements behaviour to fill a [`TxEnv`] from another transaction. +pub trait FillTxEnv { + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} + +impl FillTxEnv for TransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + #[cfg(feature = "optimism")] + let envelope = { + let mut envelope = Vec::with_capacity(self.length_without_header()); + self.encode_enveloped(&mut envelope); + envelope + }; + + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list = tx.access_list.0.clone(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list = tx.access_list.0.clone(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list = tx.access_list.0.clone(); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + } + #[cfg(feature = "optimism")] + Transaction::Deposit(tx) => { + tx_env.access_list.clear(); + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::ZERO; + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = None; + tx_env.nonce = None; + tx_env.optimism = revm_primitives::OptimismFields { + source_hash: Some(tx.source_hash), + mint: tx.mint, + is_system_transaction: Some(tx.is_system_transaction), + enveloped_tx: Some(envelope.into()), + }; + return; + } + } + + #[cfg(feature = "optimism")] + if !self.is_deposit() { + tx_env.optimism = revm_primitives::OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + enveloped_tx: Some(envelope.into()), + } + } + } +} diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 92f75db6ab11..cce6f0ca22fc 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,9 +1,8 @@ use super::access_list::AccessList; use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use bytes::BytesMut; +use core::mem; use reth_codecs::{main_codec, Compact}; -use std::mem; /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). #[main_codec] @@ -216,7 +215,7 @@ impl TxEip1559 { /// Outputs the signature hash of the transaction by first encoding without a signature, then /// hashing. pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + let mut buf = Vec::with_capacity(self.payload_len_for_signature()); self.encode_for_signing(&mut buf); keccak256(&buf) } diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 9dc46188675d..ebaa12785c1d 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,9 +1,8 @@ use super::access_list::AccessList; use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; -use bytes::BytesMut; +use core::mem; use reth_codecs::{main_codec, Compact}; -use std::mem; /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). #[main_codec] @@ -179,7 +178,7 @@ impl TxEip2930 { /// Outputs the signature hash of the transaction by first encoding without a signature, then /// hashing. pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + let mut buf = Vec::with_capacity(self.payload_len_for_signature()); self.encode_for_signing(&mut buf); keccak256(&buf) } diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 214e2a5e1e52..f792d787afdd 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -4,12 +4,15 @@ use crate::{ B256, U256, }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; +use core::mem; use reth_codecs::{main_codec, Compact, CompactPlaceholder}; -use std::mem; #[cfg(feature = "c-kzg")] use crate::kzg::KzgSettings; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) /// /// A transaction with blob hashes and max blob fee @@ -145,20 +148,28 @@ impl TxEip4844 { /// - `max_fee_per_blob_gas` /// - `blob_versioned_hashes` pub fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { + let mut tx = Self { chain_id: Decodable::decode(buf)?, nonce: Decodable::decode(buf)?, max_priority_fee_per_gas: Decodable::decode(buf)?, max_fee_per_gas: Decodable::decode(buf)?, gas_limit: Decodable::decode(buf)?, - placeholder: Some(()), + placeholder: None, to: Decodable::decode(buf)?, value: Decodable::decode(buf)?, input: Decodable::decode(buf)?, access_list: Decodable::decode(buf)?, max_fee_per_blob_gas: Decodable::decode(buf)?, blob_versioned_hashes: Decodable::decode(buf)?, - }) + }; + + // HACK: our arbitrary implementation sets the placeholder this way for backwards + // compatibility, and should be removed once `placeholder` can be removed + if tx.to != Address::default() { + tx.placeholder = Some(()) + } + + Ok(tx) } /// Outputs the length of the transaction's fields, without a RLP header. diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index 2b17fa7181a8..c5199dda5d9a 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -2,7 +2,7 @@ use crate::{GotExpectedBoxed, U256}; /// Represents error variants that can happen when trying to validate a /// [Transaction](crate::Transaction) -#[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)] +#[derive(Debug, Clone, Eq, PartialEq, thiserror_no_std::Error)] pub enum InvalidTransactionError { /// The sender does not have enough funds to cover the transaction fees #[error( @@ -55,7 +55,7 @@ pub enum InvalidTransactionError { /// Represents error variants that can happen when trying to convert a transaction to /// [`PooledTransactionsElement`](crate::PooledTransactionsElement) -#[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)] +#[derive(Debug, Clone, Eq, PartialEq, thiserror_no_std::Error)] pub enum TransactionConversionError { /// This error variant is used when a transaction cannot be converted into a /// [`PooledTransactionsElement`](crate::PooledTransactionsElement) because it is not supported @@ -66,7 +66,7 @@ pub enum TransactionConversionError { /// Represents error variants than can happen when trying to convert a /// [`TransactionSignedEcRecovered`](crate::TransactionSignedEcRecovered) transaction. -#[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)] +#[derive(Debug, Clone, Eq, PartialEq, thiserror_no_std::Error)] pub enum TryFromRecoveredTransactionError { /// Thrown if the transaction type is unsupported. #[error("Unsupported transaction type: {0}")] diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index d6cb4ae2ab1a..09b661cf7995 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,8 +1,7 @@ use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Encodable, Header}; -use bytes::BytesMut; +use core::mem; use reth_codecs::{main_codec, Compact}; -use std::mem; /// Legacy transaction. #[main_codec] @@ -163,7 +162,7 @@ impl TxLegacy { /// /// See [`Self::encode_for_signing`] for more information on the encoding format. pub(crate) fn signature_hash(&self) -> B256 { - let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + let mut buf = Vec::with_capacity(self.payload_len_for_signature()); self.encode_for_signing(&mut buf); keccak256(&buf) } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c712ac2f38bf..4e455014ff4f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -8,12 +8,12 @@ use alloy_rlp::{ Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; use bytes::Buf; +use core::mem; use derive_more::{AsRef, Deref}; use once_cell::sync::Lazy; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; -use std::mem; pub use access_list::{AccessList, AccessListItem}; pub use eip1559::TxEip1559; @@ -32,6 +32,7 @@ pub use sidecar::generate_blob_sidecar; pub use sidecar::BlobTransactionValidationError; pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; +pub use compat::FillTxEnv; pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -39,6 +40,7 @@ pub use tx_type::{ pub use variant::TransactionSignedVariant; mod access_list; +mod compat; mod eip1559; mod eip2930; mod eip4844; @@ -60,6 +62,9 @@ pub use optimism::TxDeposit; #[cfg(feature = "optimism")] pub use tx_type::DEPOSIT_TX_TYPE_ID; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// Either a transaction hash or number. pub type TxHashOrNumber = BlockHashOrNumber; @@ -72,18 +77,6 @@ pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: Lazy = _ => 5, }); -/// Minimum length of a rlp-encoded legacy transaction. -pub const MIN_LENGTH_LEGACY_TX_ENCODED: usize = 10; -/// Minimum length of a rlp-encoded eip2930 transaction. -pub const MIN_LENGTH_EIP2930_TX_ENCODED: usize = 14; -/// Minimum length of a rlp-encoded eip1559 transaction. -pub const MIN_LENGTH_EIP1559_TX_ENCODED: usize = 15; -/// Minimum length of a rlp-encoded eip4844 transaction. -pub const MIN_LENGTH_EIP4844_TX_ENCODED: usize = 37; -/// Minimum length of a rlp-encoded deposit transaction. -#[cfg(feature = "optimism")] -pub const MIN_LENGTH_DEPOSIT_TX_ENCODED: usize = 65; - /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -478,6 +471,18 @@ impl Transaction { } } + /// This sets the transaction's gas limit. + pub fn set_gas_limit(&mut self, gas_limit: u64) { + match self { + Self::Legacy(tx) => tx.gas_limit = gas_limit, + Self::Eip2930(tx) => tx.gas_limit = gas_limit, + Self::Eip1559(tx) => tx.gas_limit = gas_limit, + Self::Eip4844(tx) => tx.gas_limit = gas_limit, + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.gas_limit = gas_limit, + } + } + /// This sets the transaction's nonce. pub fn set_nonce(&mut self, nonce: u64) { match self { @@ -845,10 +850,11 @@ impl Compact for TransactionSignedNoHash { let tx_bits = if zstd_bit { TRANSACTION_COMPRESSOR.with(|compressor| { let mut compressor = compressor.borrow_mut(); - let mut tmp = bytes::BytesMut::with_capacity(200); + let mut tmp = Vec::with_capacity(256); let tx_bits = self.transaction.to_compact(&mut tmp); buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 }) } else { @@ -1448,40 +1454,6 @@ impl Decodable for TransactionSigned { } } -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for TransactionSigned { - type Parameters = (); - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - use proptest::prelude::{any, Strategy}; - - any::<(Transaction, Signature)>() - .prop_map(move |(mut transaction, sig)| { - if let Some(chain_id) = transaction.chain_id() { - // Otherwise we might overflow when calculating `v` on `recalculate_hash` - transaction.set_chain_id(chain_id % (u64::MAX / 2 - 36)); - } - - #[cfg(feature = "optimism")] - let sig = transaction - .is_deposit() - .then(Signature::optimism_deposit_tx_signature) - .unwrap_or(sig); - - if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction { - tx_eip_4844.placeholder = - if tx_eip_4844.to != Address::default() { Some(()) } else { None }; - } - - let mut tx = Self { hash: Default::default(), signature: sig, transaction }; - tx.hash = tx.recalculate_hash(); - tx - }) - .boxed() - } - - type Strategy = proptest::strategy::BoxedStrategy; -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -1491,6 +1463,21 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { transaction.set_chain_id(chain_id % (u64::MAX / 2 - 36)); } + if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction { + tx_eip_4844.placeholder = + if tx_eip_4844.to != Address::default() { Some(()) } else { None }; + } + + #[cfg(feature = "optimism")] + // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces + // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that + // it's `None` if zero. + if let Transaction::Deposit(ref mut tx_deposit) = transaction { + if tx_deposit.mint == Some(0) { + tx_deposit.mint = None; + } + } + let signature = Signature::arbitrary(u)?; #[cfg(feature = "optimism")] @@ -1630,15 +1617,14 @@ mod tests { hex, sign_message, transaction::{ from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, TxEip1559, - TxKind, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, MIN_LENGTH_EIP2930_TX_ENCODED, - MIN_LENGTH_EIP4844_TX_ENCODED, MIN_LENGTH_LEGACY_TX_ENCODED, - PARALLEL_SENDER_RECOVERY_THRESHOLD, + TxKind, TxLegacy, PARALLEL_SENDER_RECOVERY_THRESHOLD, }, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, TxEip2930, TxEip4844, B256, U256, + TransactionSignedNoHash, B256, U256, }; use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; + use proptest_arbitrary_interop::arb; use reth_codecs::Compact; use secp256k1::{Keypair, Secp256k1}; use std::str::FromStr; @@ -1914,7 +1900,7 @@ mod tests { #![proptest_config(proptest::prelude::ProptestConfig::with_cases(1))] #[test] - fn test_parallel_recovery_order(txes in proptest::collection::vec(proptest::prelude::any::(), *PARALLEL_SENDER_RECOVERY_THRESHOLD * 5)) { + fn test_parallel_recovery_order(txes in proptest::collection::vec(arb::(), *PARALLEL_SENDER_RECOVERY_THRESHOLD * 5)) { let mut rng =rand::thread_rng(); let secp = Secp256k1::new(); let txes: Vec = txes.into_iter().map(|mut tx| { @@ -1975,106 +1961,6 @@ mod tests { assert_eq!(sender, address!("7e9e359edf0dbacf96a9952fa63092d919b0842b")); } - #[test] - fn min_length_encoded_legacy_transaction() { - let transaction = TxLegacy::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Legacy(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!( - if cfg!(feature = "optimism") { - hex!("c9808080808080808080") - } else { - hex!("c98080808080801b8080") - }, - &encoded[..] - ); - assert_eq!(MIN_LENGTH_LEGACY_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[test] - fn min_length_encoded_eip2930_transaction() { - let transaction = TxEip2930::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip2930(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!(hex!("8d01cb80808080808080c0808080"), encoded[..]); - assert_eq!(MIN_LENGTH_EIP2930_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[test] - fn min_length_encoded_eip1559_transaction() { - let transaction = TxEip1559::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!(hex!("8e02cc8080808080808080c0808080"), encoded[..]); - assert_eq!(MIN_LENGTH_EIP1559_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[test] - fn min_length_encoded_eip4844_transaction() { - let transaction = TxEip4844::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip4844(transaction), - signature, - ); - - let encoded = alloy_rlp::encode(signed_tx); - assert_eq!( - hex!("a403e280808080809400000000000000000000000000000000000000008080c080c0808080"), - encoded[..] - ); - assert_eq!(MIN_LENGTH_EIP4844_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[cfg(feature = "optimism")] - #[test] - fn min_length_encoded_deposit_transaction() { - use super::MIN_LENGTH_DEPOSIT_TX_ENCODED; - use crate::TxDeposit; - - let transaction = TxDeposit::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - - assert_eq!(b"\xb8?~\xf8<\xa0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x94\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x80\x80\x80\x80\x80", &encoded[..]); - assert_eq!(MIN_LENGTH_DEPOSIT_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - #[test] fn transaction_signed_no_hash_zstd_codec() { // will use same signature everywhere. diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index bca88e36d41b..87bed1d67415 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -13,6 +13,9 @@ use derive_more::{AsRef, Deref}; use reth_codecs::add_arbitrary_tests; use serde::{Deserialize, Serialize}; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a /// non-4844 signed transaction. #[add_arbitrary_tests] @@ -612,33 +615,6 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { } } -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for PooledTransactionsElement { - type Parameters = (); - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - use proptest::prelude::{any, Strategy}; - - any::<(TransactionSigned, crate::BlobTransactionSidecar)>() - .prop_map(move |(transaction, sidecar)| { - match Self::try_from(transaction) { - Ok(Self::BlobTransaction(mut tx)) => { - tx.sidecar = sidecar; - Self::BlobTransaction(tx) - } - Ok(tx) => tx, - Err(_) => Self::Eip1559 { - transaction: Default::default(), - signature: Default::default(), - hash: Default::default(), - }, // Gen an Eip1559 as arbitrary for testing purpose - } - }) - .boxed() - } - - type Strategy = proptest::strategy::BoxedStrategy; -} - /// A signed pooled transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] pub struct PooledTransactionsElementEcRecovered { diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index b141889c1950..8e6f210bf910 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -12,6 +12,9 @@ pub use alloy_eips::eip4844::BlobTransactionSidecar; #[cfg(feature = "c-kzg")] pub use alloy_eips::eip4844::BlobTransactionValidationError; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// @@ -278,14 +281,16 @@ impl BlobTransaction { /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { - use crate::constants::eip4844::MAINNET_KZG_TRUSTED_SETUP; + use alloy_eips::eip4844::env_settings::EnvKzgSettings; use c_kzg::{KzgCommitment, KzgProof}; - let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); + let kzg_settings = EnvKzgSettings::Default; let commitments: Vec = blobs .iter() - .map(|blob| KzgCommitment::blob_to_kzg_commitment(&blob.clone(), &kzg_settings).unwrap()) + .map(|blob| { + KzgCommitment::blob_to_kzg_commitment(&blob.clone(), kzg_settings.get()).unwrap() + }) .map(|commitment| commitment.to_bytes()) .collect(); @@ -293,7 +298,7 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar .iter() .zip(commitments.iter()) .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob, commitment, &kzg_settings).unwrap() + KzgProof::compute_blob_kzg_proof(blob, commitment, kzg_settings.get()).unwrap() }) .map(|proof| proof.to_bytes()) .collect(); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 14a1f7de8c0f..3176f8934561 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -193,7 +193,7 @@ impl Signature { /// Calculates a heuristic for the in-memory size of the [Signature]. #[inline] pub const fn size(&self) -> usize { - std::mem::size_of::() + core::mem::size_of::() } } diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index b3f7a00be922..5bff5215d7ac 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -5,7 +5,7 @@ use crate::{ Address, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, B256, }; -use std::ops::Deref; +use core::ops::Deref; /// Represents various different transaction formats used in reth. /// diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 016265e1b5c6..2f2a37d5ba66 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-exex-types.workspace = true -reth-primitives.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-errors.workspace = true @@ -22,6 +22,7 @@ reth-provider.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true +reth-static-file-types.workspace = true # metrics reth-metrics.workspace = true @@ -34,6 +35,7 @@ thiserror.workspace = true itertools.workspace = true rayon.workspace = true tokio.workspace = true +rustc-hash.workspace = true [dev-dependencies] # reth diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index a91a0faa6d51..233e80a71a9a 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,8 +1,8 @@ use crate::{segments::SegmentSet, Pruner}; +use reth_chainspec::MAINNET; use reth_config::PruneConfig; use reth_db_api::database::Database; use reth_exex_types::FinishedExExHeight; -use reth_primitives::MAINNET; use reth_provider::ProviderFactory; use reth_prune_types::PruneModes; use std::time::Duration; diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs index 7007e3f47568..95a90d7628cc 100644 --- a/crates/prune/prune/src/event.rs +++ b/crates/prune/prune/src/event.rs @@ -1,6 +1,6 @@ use alloy_primitives::BlockNumber; use reth_prune_types::{PruneProgress, PruneSegment}; -use std::{collections::BTreeMap, time::Duration}; +use std::time::Duration; /// An event emitted by a [Pruner][crate::Pruner]. #[derive(Debug, PartialEq, Eq, Clone)] @@ -11,6 +11,6 @@ pub enum PrunerEvent { Finished { tip_block_number: BlockNumber, elapsed: Duration, - stats: BTreeMap, + stats: Vec<(PruneSegment, usize, PruneProgress)>, }, } diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index f889da41d923..65ab77b3c39e 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -5,18 +5,16 @@ use crate::{ segments::{PruneInput, Segment}, Metrics, PrunerError, PrunerEvent, }; +use alloy_primitives::BlockNumber; use reth_db_api::database::Database; use reth_exex_types::FinishedExExHeight; -use reth_primitives::{BlockNumber, StaticFileSegment}; use reth_provider::{ DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, }; use reth_prune_types::{PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment}; +use reth_static_file_types::StaticFileSegment; use reth_tokio_util::{EventSender, EventStream}; -use std::{ - collections::BTreeMap, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; use tokio::sync::watch; use tracing::debug; @@ -26,7 +24,7 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); -type PrunerStats = BTreeMap; +type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; /// Pruning routine. Main pruning logic happens in [`Pruner::run`]. #[derive(Debug)] @@ -84,7 +82,11 @@ impl Pruner { self.event_sender.new_listener() } - /// Run the pruner + /// Run the pruner. This will only prune data up to the highest finished `ExEx` height, if there + /// are no `ExEx`s, . + /// + /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data + /// to prune. pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { let Some(tip_block_number) = self.adjust_tip_block_number_to_finished_exex_height(tip_block_number) @@ -236,7 +238,7 @@ impl Pruner { if output.pruned > 0 { limiter.increment_deleted_entries_count_by(output.pruned); pruned += output.pruned; - stats.insert(segment.segment(), (output.progress, output.pruned)); + stats.push((segment.segment(), output.pruned, output.progress)); } } else { debug!(target: "pruner", segment = ?segment.segment(), ?purpose, "Nothing to prune for the segment"); @@ -305,8 +307,8 @@ impl Pruner { /// Adjusts the tip block number to the finished `ExEx` height. This is needed to not prune more /// data than `ExExs` have processed. Depending on the height: - /// - [`FinishedExExHeight::NoExExs`] returns the tip block number as is as no adjustment for - /// `ExExs` is needed. + /// - [`FinishedExExHeight::NoExExs`] returns the tip block number as no adjustment for `ExExs` + /// is needed. /// - [`FinishedExExHeight::NotReady`] returns `None` as not all `ExExs` have emitted a /// `FinishedHeight` event yet. /// - [`FinishedExExHeight::Height`] returns the finished `ExEx` height. @@ -332,9 +334,9 @@ impl Pruner { mod tests { use crate::Pruner; + use reth_chainspec::MAINNET; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; use reth_exex_types::FinishedExExHeight; - use reth_primitives::MAINNET; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; #[test] diff --git a/crates/prune/prune/src/segments/account_history.rs b/crates/prune/prune/src/segments/account_history.rs index 34d63096785c..90845e859a03 100644 --- a/crates/prune/prune/src/segments/account_history.rs +++ b/crates/prune/prune/src/segments/account_history.rs @@ -4,10 +4,12 @@ use crate::{ }, PrunerError, }; +use itertools::Itertools; use reth_db::tables; use reth_db_api::{database::Database, models::ShardedKey}; use reth_provider::DatabaseProviderRW; use reth_prune_types::{PruneInterruptReason, PruneMode, PruneProgress, PruneSegment}; +use rustc_hash::FxHashMap; use tracing::{instrument, trace}; /// Number of account history tables to prune in one step. @@ -64,34 +66,53 @@ impl Segment for AccountHistory { } let mut last_changeset_pruned_block = None; + // Deleted account changeset keys (account addresses) with the highest block number deleted + // for that key. + // + // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / + // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5 + // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total + // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is + // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + let mut highest_deleted_accounts = FxHashMap::default(); let (pruned_changesets, done) = provider .prune_table_with_range::( range, &mut limiter, |_| false, - |row| last_changeset_pruned_block = Some(row.0), + |(block_number, account)| { + highest_deleted_accounts.insert(account.address, block_number); + last_changeset_pruned_block = Some(block_number); + }, )?; trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned account history (changesets)"); let last_changeset_pruned_block = last_changeset_pruned_block - // If there's more account account changesets to prune, set the checkpoint block number - // to previous, so we could finish pruning its account changesets on the next run. + // If there's more account changesets to prune, set the checkpoint block number to + // previous, so we could finish pruning its account changesets on the next run. .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) .unwrap_or(range_end); - let (processed, pruned_indices) = prune_history_indices::( + // Sort highest deleted block numbers by account address and turn them into sharded keys. + // We did not use `BTreeMap` from the beginning, because it's inefficient for hashes. + let highest_sharded_keys = highest_deleted_accounts + .into_iter() + .sorted_unstable() // Unstable is fine because no equal keys exist in the map + .map(|(address, block_number)| { + ShardedKey::new(address, block_number.min(last_changeset_pruned_block)) + }); + let outcomes = prune_history_indices::( provider, - last_changeset_pruned_block, + highest_sharded_keys, |a, b| a.key == b.key, - |key| ShardedKey::last(key.key), )?; - trace!(target: "pruner", %processed, pruned = %pruned_indices, %done, "Pruned account history (history)"); + trace!(target: "pruner", ?outcomes, %done, "Pruned account history (indices)"); let progress = PruneProgress::new(done, &limiter); Ok(PruneOutput { progress, - pruned: pruned_changesets + pruned_indices, + pruned: pruned_changesets + outcomes.deleted, checkpoint: Some(PruneOutputCheckpoint { block_number: Some(last_changeset_pruned_block), tx_number: None, @@ -106,9 +127,9 @@ mod tests { account_history::ACCOUNT_HISTORY_TABLES_TO_PRUNE, AccountHistory, PruneInput, PruneOutput, Segment, }; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; - use reth_primitives::{BlockNumber, B256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, diff --git a/crates/prune/prune/src/segments/headers.rs b/crates/prune/prune/src/segments/headers.rs index 0c679a7ccd85..57294c556ca2 100644 --- a/crates/prune/prune/src/segments/headers.rs +++ b/crates/prune/prune/src/segments/headers.rs @@ -188,10 +188,10 @@ where #[cfg(test)] mod tests { + use alloy_primitives::{BlockNumber, B256, U256}; use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::transaction::DbTx; - use reth_primitives::{BlockNumber, B256, U256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, diff --git a/crates/prune/prune/src/segments/history.rs b/crates/prune/prune/src/segments/history.rs index 253d453aa597..ff477a39f942 100644 --- a/crates/prune/prune/src/segments/history.rs +++ b/crates/prune/prune/src/segments/history.rs @@ -1,4 +1,5 @@ -use reth_db::BlockNumberList; +use alloy_primitives::BlockNumber; +use reth_db::{BlockNumberList, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -7,106 +8,153 @@ use reth_db_api::{ transaction::DbTxMut, DatabaseError, }; -use reth_primitives::BlockNumber; use reth_provider::DatabaseProviderRW; -/// Prune history indices up to the provided block, inclusive. +enum PruneShardOutcome { + Deleted, + Updated, + Unchanged, +} + +#[derive(Debug, Default)] +pub(crate) struct PrunedIndices { + pub(crate) deleted: usize, + pub(crate) updated: usize, + pub(crate) unchanged: usize, +} + +/// Prune history indices according to the provided list of highest sharded keys. /// -/// Returns total number of processed (walked) and deleted entities. +/// Returns total number of deleted, updated and unchanged entities. pub(crate) fn prune_history_indices( provider: &DatabaseProviderRW, - to_block: BlockNumber, + highest_sharded_keys: impl IntoIterator, key_matches: impl Fn(&T::Key, &T::Key) -> bool, - last_key: impl Fn(&T::Key) -> T::Key, -) -> Result<(usize, usize), DatabaseError> +) -> Result where DB: Database, T: Table, T::Key: AsRef>, { - let mut processed = 0; - let mut deleted = 0; - let mut cursor = provider.tx_ref().cursor_write::()?; + let mut outcomes = PrunedIndices::default(); + let mut cursor = provider.tx_ref().cursor_write::>()?; + + for sharded_key in highest_sharded_keys { + // Seek to the shard that has the key >= the given sharded key + // TODO: optimize + let mut shard = cursor.seek(RawKey::new(sharded_key.clone()))?; - // Prune history table: - // 1. If the shard has `highest_block_number` less than or equal to the target block number - // for pruning, delete the shard completely. - // 2. If the shard has `highest_block_number` greater than the target block number for - // pruning, filter block numbers inside the shard which are less than the target - // block number for pruning. - while let Some(result) = cursor.next()? { - let (key, blocks): (T::Key, BlockNumberList) = result; + // Get the highest block number that needs to be deleted for this sharded key + let to_block = sharded_key.as_ref().highest_block_number; - // If shard consists only of block numbers less than the target one, delete shard - // completely. - if key.as_ref().highest_block_number <= to_block { - cursor.delete_current()?; - deleted += 1; - if key.as_ref().highest_block_number == to_block { - // Shard contains only block numbers up to the target one, so we can skip to - // the last shard for this key. It is guaranteed that further shards for this - // sharded key will not contain the target block number, as it's in this shard. - cursor.seek_exact(last_key(&key))?; + 'shard: loop { + let Some((key, block_nums)) = + shard.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))).transpose()? + else { + break + }; + + if key_matches(&key, &sharded_key) { + match prune_shard(&mut cursor, key, block_nums, to_block, &key_matches)? { + PruneShardOutcome::Deleted => outcomes.deleted += 1, + PruneShardOutcome::Updated => outcomes.updated += 1, + PruneShardOutcome::Unchanged => outcomes.unchanged += 1, + } + } else { + // If such shard doesn't exist, skip to the next sharded key + break 'shard } + + shard = cursor.next()?; } - // Shard contains block numbers that are higher than the target one, so we need to - // filter it. It is guaranteed that further shards for this sharded key will not - // contain the target block number, as it's in this shard. - else { - let higher_blocks = - blocks.iter().skip_while(|block| *block <= to_block).collect::>(); + } - // If there were blocks less than or equal to the target one - // (so the shard has changed), update the shard. - if blocks.len() as usize != higher_blocks.len() { - // If there will be no more blocks in the shard after pruning blocks below target - // block, we need to remove it, as empty shards are not allowed. - if higher_blocks.is_empty() { - if key.as_ref().highest_block_number == u64::MAX { - let prev_row = cursor.prev()?; - match prev_row { - // If current shard is the last shard for the sharded key that - // has previous shards, replace it with the previous shard. - Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { - cursor.delete_current()?; - deleted += 1; - // Upsert will replace the last shard for this sharded key with - // the previous value. - cursor.upsert(key.clone(), prev_value)?; - } - // If there's no previous shard for this sharded key, - // just delete last shard completely. - _ => { - // If we successfully moved the cursor to a previous row, - // jump to the original last shard. - if prev_row.is_some() { - cursor.next()?; - } - // Delete shard. - cursor.delete_current()?; - deleted += 1; + Ok(outcomes) +} + +/// Prunes one shard of a history table. +/// +/// 1. If the shard has `highest_block_number` less than or equal to the target block number for +/// pruning, delete the shard completely. +/// 2. If the shard has `highest_block_number` greater than the target block number for pruning, +/// filter block numbers inside the shard which are less than the target block number for +/// pruning. +fn prune_shard( + cursor: &mut C, + key: T::Key, + raw_blocks: RawValue, + to_block: BlockNumber, + key_matches: impl Fn(&T::Key, &T::Key) -> bool, +) -> Result +where + C: DbCursorRO> + DbCursorRW>, + T: Table, + T::Key: AsRef>, +{ + // If shard consists only of block numbers less than the target one, delete shard + // completely. + if key.as_ref().highest_block_number <= to_block { + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) + } + // Shard contains block numbers that are higher than the target one, so we need to + // filter it. It is guaranteed that further shards for this sharded key will not + // contain the target block number, as it's in this shard. + else { + let blocks = raw_blocks.value()?; + let higher_blocks = + blocks.iter().skip_while(|block| *block <= to_block).collect::>(); + + // If there were blocks less than or equal to the target one + // (so the shard has changed), update the shard. + if blocks.len() as usize != higher_blocks.len() { + // If there will be no more blocks in the shard after pruning blocks below target + // block, we need to remove it, as empty shards are not allowed. + if higher_blocks.is_empty() { + if key.as_ref().highest_block_number == u64::MAX { + let prev_row = cursor + .prev()? + .map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))) + .transpose()?; + match prev_row { + // If current shard is the last shard for the sharded key that + // has previous shards, replace it with the previous shard. + Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { + cursor.delete_current()?; + // Upsert will replace the last shard for this sharded key with + // the previous value. + cursor.upsert(RawKey::new(key), prev_value)?; + Ok(PruneShardOutcome::Updated) + } + // If there's no previous shard for this sharded key, + // just delete last shard completely. + _ => { + // If we successfully moved the cursor to a previous row, + // jump to the original last shard. + if prev_row.is_some() { + cursor.next()?; } + // Delete shard. + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) } } - // If current shard is not the last shard for this sharded key, - // just delete it. - else { - cursor.delete_current()?; - deleted += 1; - } - } else { - cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(higher_blocks))?; } + // If current shard is not the last shard for this sharded key, + // just delete it. + else { + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) + } + } else { + cursor.upsert( + RawKey::new(key), + RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), + )?; + Ok(PruneShardOutcome::Updated) } - - // Jump to the last shard for this key, if current key isn't already the last shard. - if key.as_ref().highest_block_number != u64::MAX { - cursor.seek_exact(last_key(&key))?; - } + } else { + Ok(PruneShardOutcome::Unchanged) } - - processed += 1; } - - Ok((processed, deleted)) } diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 5599b8defb21..de97a2aaf6f4 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -94,13 +94,13 @@ impl Segment for Receipts { #[cfg(test)] mod tests { use crate::segments::{PruneInput, PruneOutput, Receipts, Segment}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ FoldWhile::{Continue, Done}, Itertools, }; use reth_db::tables; - use reth_primitives::{BlockNumber, TxNumber, B256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, diff --git a/crates/prune/prune/src/segments/receipts_by_logs.rs b/crates/prune/prune/src/segments/receipts_by_logs.rs index e6b7596067ab..88c39beacaa1 100644 --- a/crates/prune/prune/src/segments/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/receipts_by_logs.rs @@ -216,10 +216,10 @@ impl Segment for ReceiptsByLogs { #[cfg(test)] mod tests { use crate::segments::{receipts_by_logs::ReceiptsByLogs, PruneInput, Segment}; + use alloy_primitives::B256; use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; - use reth_primitives::B256; use reth_provider::{PruneCheckpointReader, TransactionsProvider}; use reth_prune_types::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/prune/src/segments/sender_recovery.rs b/crates/prune/prune/src/segments/sender_recovery.rs index 3360bfa2b77c..aa045b76aa25 100644 --- a/crates/prune/prune/src/segments/sender_recovery.rs +++ b/crates/prune/prune/src/segments/sender_recovery.rs @@ -77,13 +77,13 @@ impl Segment for SenderRecovery { #[cfg(test)] mod tests { use crate::segments::{PruneInput, PruneOutput, Segment, SenderRecovery}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ FoldWhile::{Continue, Done}, Itertools, }; use reth_db::tables; - use reth_primitives::{BlockNumber, TxNumber, B256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 7ff7da2be1a2..e38ffccf35cf 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -48,6 +48,10 @@ impl SegmentSet { } = prune_modes; Self::default() + // Account history + .segment_opt(account_history.map(AccountHistory::new)) + // Storage history + .segment_opt(storage_history.map(StorageHistory::new)) // Receipts .segment_opt(receipts.map(Receipts::new)) // Receipts by logs @@ -59,10 +63,6 @@ impl SegmentSet { .segment_opt(transaction_lookup.map(TransactionLookup::new)) // Sender recovery .segment_opt(sender_recovery.map(SenderRecovery::new)) - // Account history - .segment_opt(account_history.map(AccountHistory::new)) - // Storage history - .segment_opt(storage_history.map(StorageHistory::new)) } } diff --git a/crates/prune/prune/src/segments/storage_history.rs b/crates/prune/prune/src/segments/storage_history.rs index 3bf0fdda3585..18a2a7af458c 100644 --- a/crates/prune/prune/src/segments/storage_history.rs +++ b/crates/prune/prune/src/segments/storage_history.rs @@ -4,6 +4,7 @@ use crate::{ }, PrunerError, }; +use itertools::Itertools; use reth_db::tables; use reth_db_api::{ database::Database, @@ -11,6 +12,7 @@ use reth_db_api::{ }; use reth_provider::DatabaseProviderRW; use reth_prune_types::{PruneInterruptReason, PruneMode, PruneProgress, PruneSegment}; +use rustc_hash::FxHashMap; use tracing::{instrument, trace}; /// Number of storage history tables to prune in one step @@ -67,34 +69,58 @@ impl Segment for StorageHistory { } let mut last_changeset_pruned_block = None; + // Deleted storage changeset keys (account addresses and storage slots) with the highest + // block number deleted for that key. + // + // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / + // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5 + // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total + // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is + // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + let mut highest_deleted_storages = FxHashMap::default(); let (pruned_changesets, done) = provider .prune_table_with_range::( BlockNumberAddress::range(range), &mut limiter, |_| false, - |row| last_changeset_pruned_block = Some(row.0.block_number()), + |(BlockNumberAddress((block_number, address)), entry)| { + highest_deleted_storages.insert((address, entry.key), block_number); + last_changeset_pruned_block = Some(block_number); + }, )?; trace!(target: "pruner", deleted = %pruned_changesets, %done, "Pruned storage history (changesets)"); let last_changeset_pruned_block = last_changeset_pruned_block - // If there's more storage storage changesets to prune, set the checkpoint block number - // to previous, so we could finish pruning its storage changesets on the next run. + // If there's more storage changesets to prune, set the checkpoint block number to + // previous, so we could finish pruning its storage changesets on the next run. .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) .unwrap_or(range_end); - let (processed, pruned_indices) = prune_history_indices::( + // Sort highest deleted block numbers by account address and storage key and turn them into + // sharded keys. + // We did not use `BTreeMap` from the beginning, because it's inefficient for hashes. + let highest_sharded_keys = highest_deleted_storages + .into_iter() + .sorted_unstable() // Unstable is fine because no equal keys exist in the map + .map(|((address, storage_key), block_number)| { + StorageShardedKey::new( + address, + storage_key, + block_number.min(last_changeset_pruned_block), + ) + }); + let outcomes = prune_history_indices::( provider, - last_changeset_pruned_block, + highest_sharded_keys, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, - |key| StorageShardedKey::last(key.address, key.sharded_key.key), )?; - trace!(target: "pruner", %processed, deleted = %pruned_indices, %done, "Pruned storage history (history)"); + trace!(target: "pruner", ?outcomes, %done, "Pruned storage history (indices)"); let progress = PruneProgress::new(done, &limiter); Ok(PruneOutput { progress, - pruned: pruned_changesets + pruned_indices, + pruned: pruned_changesets + outcomes.deleted, checkpoint: Some(PruneOutputCheckpoint { block_number: Some(last_changeset_pruned_block), tx_number: None, @@ -109,9 +135,9 @@ mod tests { storage_history::STORAGE_HISTORY_TABLES_TO_PRUNE, PruneInput, PruneOutput, Segment, StorageHistory, }; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; - use reth_primitives::{BlockNumber, B256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/prune/src/segments/transaction_lookup.rs b/crates/prune/prune/src/segments/transaction_lookup.rs index 478fc0e4d223..22b20c925c34 100644 --- a/crates/prune/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/transaction_lookup.rs @@ -104,13 +104,13 @@ impl Segment for TransactionLookup { #[cfg(test)] mod tests { use crate::segments::{PruneInput, PruneOutput, Segment, TransactionLookup}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ FoldWhile::{Continue, Done}, Itertools, }; use reth_db::tables; - use reth_primitives::{BlockNumber, TxNumber, B256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, diff --git a/crates/prune/prune/src/segments/transactions.rs b/crates/prune/prune/src/segments/transactions.rs index 751a374281ca..4a30808cbc50 100644 --- a/crates/prune/prune/src/segments/transactions.rs +++ b/crates/prune/prune/src/segments/transactions.rs @@ -76,13 +76,13 @@ impl Segment for Transactions { #[cfg(test)] mod tests { use crate::segments::{PruneInput, PruneOutput, Segment, Transactions}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ FoldWhile::{Continue, Done}, Itertools, }; use reth_db::tables; - use reth_primitives::{BlockNumber, TxNumber, B256}; use reth_provider::PruneCheckpointReader; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 3895ebcd05c0..4fd5b9336812 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -26,6 +26,7 @@ arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true proptest-derive.workspace = true +proptest-arbitrary-interop.workspace = true serde_json.workspace = true test-fuzz.workspace = true -toml.workspace = true \ No newline at end of file +toml.workspace = true diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 34d74614f2c1..82563010f165 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod checkpoint; diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index cf7c0f5fa532..bbb60b293edd 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-storage-errors.workspace = true reth-execution-errors.workspace = true @@ -26,7 +27,6 @@ revm.workspace = true # alloy alloy-eips.workspace = true -alloy-rlp.workspace = true # common tracing.workspace = true @@ -35,4 +35,8 @@ tracing.workspace = true reth-trie.workspace = true [features] +default = ["std", "c-kzg"] +std = [] +c-kzg = ["revm/c-kzg"] test-utils = ["dep:reth-trie"] +optimism = ["revm/optimism"] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 7b1980cdc22c..87d9898c803b 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,13 +1,17 @@ //! Helper for handling execution of multiple blocks. use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; +use core::time::Duration; use reth_execution_errors::BlockExecutionError; use reth_primitives::{Receipt, Receipts, Request, Requests}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; -use std::time::Duration; +use std::collections::HashSet; use tracing::debug; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + /// Takes care of: /// - recording receipts during execution of multiple blocks. /// - pruning receipts according to the pruning configuration. @@ -30,9 +34,10 @@ pub struct BlockBatchRecord { /// guaranteed to be the same as the number of transactions. requests: Vec, /// Memoized address pruning filter. + /// /// Empty implies that there is going to be addresses to include in the filter in a future /// block. None means there isn't any kind of configuration. - pruning_address_filter: Option<(u64, Vec
)>, + pruning_address_filter: Option<(u64, HashSet
)>, /// First block will be initialized to `None` /// and be set to the block number of first block executed. first_block: Option, @@ -78,7 +83,7 @@ impl BlockBatchRecord { /// Returns all recorded receipts. pub fn take_receipts(&mut self) -> Receipts { - std::mem::take(&mut self.receipts) + core::mem::take(&mut self.receipts) } /// Returns the recorded requests. @@ -88,7 +93,7 @@ impl BlockBatchRecord { /// Returns all recorded requests. pub fn take_requests(&mut self) -> Vec { - std::mem::take(&mut self.requests) + core::mem::take(&mut self.requests) } /// Returns the [`BundleRetention`] for the given block based on the configured prune modes. @@ -124,10 +129,7 @@ impl BlockBatchRecord { &mut self, receipts: &mut Vec>, ) -> Result<(), PruneSegmentError> { - let (first_block, tip) = match self.first_block.zip(self.tip) { - Some((block, tip)) => (block, tip), - _ => return Ok(()), - }; + let (Some(first_block), Some(tip)) = (self.first_block, self.tip) else { return Ok(()) }; let block_number = first_block + self.receipts.len() as u64; @@ -151,18 +153,18 @@ impl BlockBatchRecord { let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; if !contract_log_pruner.is_empty() { - let (prev_block, filter) = self.pruning_address_filter.get_or_insert((0, Vec::new())); + let (prev_block, filter) = + self.pruning_address_filter.get_or_insert_with(|| (0, HashSet::new())); for (_, addresses) in contract_log_pruner.range(*prev_block..=block_number) { filter.extend(addresses.iter().copied()); } } - for receipt in receipts.iter_mut() { - let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); - - // If there is an address_filter, and it does not contain any of the - // contract addresses, then remove this receipts - if let Some((_, filter)) = &self.pruning_address_filter { + if let Some((_, filter)) = &self.pruning_address_filter { + for receipt in receipts.iter_mut() { + // If there is an address_filter, it does not contain any of the + // contract addresses, then remove this receipt. + let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { receipt.take(); } @@ -208,3 +210,189 @@ impl BlockExecutorStats { ); } } + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{Address, Log, Receipt}; + use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; + use std::collections::BTreeMap; + + #[test] + fn test_save_receipts_empty() { + let mut recorder = BlockBatchRecord::default(); + // Create an empty vector of receipts + let receipts = vec![]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the saved receipts are equal to a nested empty vector + assert_eq!(*recorder.receipts(), vec![vec![]].into()); + } + + #[test] + fn test_save_receipts_non_empty_no_pruning() { + let mut recorder = BlockBatchRecord::default(); + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + #[test] + fn test_save_receipts_with_pruning_no_prunable_receipts() { + let mut recorder = BlockBatchRecord::default(); + + // Set the first block number + recorder.set_first_block(1); + // Set the tip (highest known block) + recorder.set_tip(130); + + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + #[test] + fn test_save_receipts_with_pruning_no_tip() { + // Create a PruneModes with receipts set to PruneMode::Full + let prune_modes = PruneModes { receipts: Some(PruneMode::Full), ..Default::default() }; + + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the first block number + recorder.set_first_block(1); + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + #[test] + fn test_save_receipts_with_pruning_no_block_number() { + // Create a PruneModes with receipts set to PruneMode::Full + let prune_modes = PruneModes { receipts: Some(PruneMode::Full), ..Default::default() }; + + // Create a BlockBatchRecord with the prune_modes + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the tip (highest known block) + recorder.set_tip(130); + + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + // Test saving receipts with pruning configuration and receipts should be pruned + #[test] + fn test_save_receipts_with_pruning_should_prune() { + // Create a PruneModes with receipts set to PruneMode::Full + let prune_modes = PruneModes { receipts: Some(PruneMode::Full), ..Default::default() }; + + // Create a BlockBatchRecord with the prune_modes + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the first block number + recorder.set_first_block(1); + // Set the tip (highest known block) + recorder.set_tip(130); + + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the receipts are pruned (empty) + assert!(recorder.receipts()[0].is_empty()); + } + + // Test saving receipts with address filter pruning + #[test] + fn test_save_receipts_with_address_filter_pruning() { + // Create a PruneModes with receipts_log_filter configuration + let prune_modes = PruneModes { + receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ + (Address::with_last_byte(1), PruneMode::Before(1300001)), + (Address::with_last_byte(2), PruneMode::Before(1300002)), + (Address::with_last_byte(3), PruneMode::Distance(1300003)), + ])), + ..Default::default() + }; + + // Create a BlockBatchRecord with the prune_modes + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the first block number + recorder.set_first_block(1); + // Set the tip (highest known block) + recorder.set_tip(1300000); + + // With a receipt that should be pruned (address 4 not in the log filter) + let mut receipt = Receipt::default(); + receipt.logs.push(Log { address: Address::with_last_byte(4), ..Default::default() }); + let receipts = vec![receipt.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the receipts are pruned (empty) + assert_eq!(recorder.receipts().len(), 1); + assert_eq!(recorder.receipts()[0], vec![None]); + + // With a receipt that should not be pruned (address 1 in the log filter) + let mut receipt1 = Receipt::default(); + receipt1.logs.push(Log { address: Address::with_last_byte(1), ..Default::default() }); + let receipts = vec![receipt1.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the second block of receipts contains the receipt + assert_eq!(recorder.receipts().len(), 2); + assert_eq!(recorder.receipts()[1][0], Some(receipt1)); + + // With a receipt that should not be pruned (address 2 in the log filter) + let mut receipt2 = Receipt::default(); + receipt2.logs.push(Log { address: Address::with_last_byte(2), ..Default::default() }); + let receipts = vec![receipt2.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the third block of receipts contains the receipt + assert_eq!(recorder.receipts().len(), 3); + assert_eq!(recorder.receipts()[2][0], Some(receipt2)); + + // With a receipt that should not be pruned (address 3 in the log filter) + let mut receipt3 = Receipt::default(); + receipt3.logs.push(Log { address: Address::with_last_byte(3), ..Default::default() }); + let receipts = vec![receipt3.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the fourth block of receipts contains the receipt + assert_eq!(recorder.receipts().len(), 4); + assert_eq!(recorder.receipts()[3][0], Some(receipt3)); + } +} diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index fc40f474a2fb..5edd76bea4da 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,12 +1,12 @@ use crate::primitives::alloy_primitives::{BlockNumber, StorageKey, StorageValue}; -use reth_primitives::{Account, Address, B256, KECCAK_EMPTY, U256}; +use core::ops::{Deref, DerefMut}; +use reth_primitives::{Account, Address, B256, U256}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::{ db::DatabaseRef, primitives::{AccountInfo, Bytecode}, Database, }; -use std::ops::{Deref, DerefMut}; /// A helper trait responsible for providing that necessary state for the EVM execution. /// @@ -121,7 +121,7 @@ impl Database for StateProviderDatabase { /// /// Returns `Ok` with the block hash if found, or the default hash otherwise. /// Note: It safely casts the `number` to `u64`. - fn block_hash(&mut self, number: U256) -> Result { + fn block_hash(&mut self, number: u64) -> Result { DatabaseRef::block_hash_ref(self, number) } } @@ -134,12 +134,7 @@ impl DatabaseRef for StateProviderDatabase { /// Returns `Ok` with `Some(AccountInfo)` if the account exists, /// `None` if it doesn't, or an error if encountered. fn basic_ref(&self, address: Address) -> Result, Self::Error> { - Ok(self.basic_account(address)?.map(|account| AccountInfo { - balance: account.balance, - nonce: account.nonce, - code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), - code: None, - })) + Ok(self.basic_account(address)?.map(Into::into)) } /// Retrieves the bytecode associated with a given code hash. @@ -159,14 +154,8 @@ impl DatabaseRef for StateProviderDatabase { /// Retrieves the block hash for a given block number. /// /// Returns `Ok` with the block hash if found, or the default hash otherwise. - fn block_hash_ref(&self, number: U256) -> Result { - // Attempt to convert U256 to u64 - let block_number = match number.try_into() { - Ok(value) => value, - Err(_) => return Err(Self::Error::BlockNumberOverflow(number)), - }; - - // Get the block hash or default hash - Ok(self.0.block_hash(block_number)?.unwrap_or_default()) + fn block_hash_ref(&self, number: u64) -> Result { + // Get the block hash or default hash with an attempt to convert U256 block number to u64 + Ok(self.0.block_hash(number)?.unwrap_or_default()) } } diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 8e5419567010..4fb6c30d1c50 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -7,6 +7,10 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index b57884cf1a75..0d64e3fc4874 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,74 +1,61 @@ -use alloy_eips::{ - eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, - eip7002::WithdrawalRequest, -}; -use alloy_rlp::Buf; +use alloy_eips::eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus_common::calc; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{ - revm::env::{ - fill_tx_env_with_beacon_root_contract_call, - fill_tx_env_with_withdrawal_requests_contract_call, - }, - Address, ChainSpec, Header, Request, Withdrawal, B256, U256, -}; +use reth_primitives::{Address, Block, Withdrawal, Withdrawals, B256, U256}; use reth_storage_errors::provider::ProviderError; use revm::{ - interpreter::Host, - primitives::{ - Account, AccountInfo, Bytecode, EvmStorageSlot, ExecutionResult, FixedBytes, ResultAndState, - }, - Database, DatabaseCommit, Evm, + primitives::{Account, AccountInfo, Bytecode, EvmStorageSlot, BLOCKHASH_SERVE_WINDOW}, + Database, DatabaseCommit, }; + +// reuse revm's hashbrown implementation for no-std +#[cfg(not(feature = "std"))] +use crate::precompile::HashMap; +#[cfg(not(feature = "std"))] +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; + +#[cfg(feature = "std")] use std::collections::HashMap; /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular /// state changes (DAO fork). -#[allow(clippy::too_many_arguments)] #[inline] pub fn post_block_balance_increments( chain_spec: &ChainSpec, - block_number: u64, - block_difficulty: U256, - beneficiary: Address, - block_timestamp: u64, + block: &Block, total_difficulty: U256, - ommers: &[Header], - withdrawals: Option<&[Withdrawal]>, ) -> HashMap { let mut balance_increments = HashMap::new(); // Add block rewards if they are enabled. if let Some(base_block_reward) = - calc::base_block_reward(chain_spec, block_number, block_difficulty, total_difficulty) + calc::base_block_reward(chain_spec, block.number, block.difficulty, total_difficulty) { // Ommer rewards - for ommer in ommers { + for ommer in &block.ommers { *balance_increments.entry(ommer.beneficiary).or_default() += - calc::ommer_reward(base_block_reward, block_number, ommer.number); + calc::ommer_reward(base_block_reward, block.number, ommer.number); } // Full block reward - *balance_increments.entry(beneficiary).or_default() += - calc::block_reward(base_block_reward, ommers.len()); + *balance_increments.entry(block.beneficiary).or_default() += + calc::block_reward(base_block_reward, block.ommers.len()); } // process withdrawals insert_post_block_withdrawals_balance_increments( chain_spec, - block_timestamp, - withdrawals, + block.timestamp, + block.withdrawals.as_ref().map(Withdrawals::as_ref), &mut balance_increments, ); balance_increments } -/// todo: temporary move over of constants from revm until we've migrated to the latest version -pub const HISTORY_SERVE_WINDOW: u64 = 8192; - /// Applies the pre-block state change outlined in [EIP-2935] to store historical blockhashes in a /// system contract. /// @@ -79,7 +66,7 @@ pub const HISTORY_SERVE_WINDOW: u64 = 8192; /// /// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 #[inline] -pub fn apply_blockhashes_update + DatabaseCommit>( +pub fn apply_blockhashes_update> + DatabaseCommit>( db: &mut DB, chain_spec: &ChainSpec, block_timestamp: u64, @@ -87,7 +74,7 @@ pub fn apply_blockhashes_update + DatabaseCo parent_block_hash: B256, ) -> Result<(), BlockExecutionError> where - DB::Error: std::fmt::Display, + DB::Error: core::fmt::Display, { // If Prague is not activated or this is the genesis block, no hashes are added. if !chain_spec.is_prague_active_at_timestamp(block_timestamp) || block_number == 0 { @@ -101,7 +88,7 @@ where // nonce of 1, so it does not get deleted. let mut account: Account = db .basic(HISTORY_STORAGE_ADDRESS) - .map_err(BlockValidationError::BlockHashAccountLoadingFailed)? + .map_err(|err| BlockValidationError::BlockHashAccountLoadingFailed(err.into()))? .unwrap_or_else(|| AccountInfo { nonce: 1, code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), @@ -125,85 +112,19 @@ where /// /// This calculates the correct storage slot in the `BLOCKHASH` history storage address, fetches the /// blockhash and creates a [`EvmStorageSlot`] with appropriate previous and new values. -fn eip2935_block_hash_slot>( +fn eip2935_block_hash_slot>>( db: &mut DB, block_number: u64, block_hash: B256, ) -> Result<(U256, EvmStorageSlot), BlockValidationError> { - let slot = U256::from(block_number % HISTORY_SERVE_WINDOW); + let slot = U256::from(block_number % BLOCKHASH_SERVE_WINDOW as u64); let current_hash = db .storage(HISTORY_STORAGE_ADDRESS, slot) - .map_err(BlockValidationError::BlockHashAccountLoadingFailed)?; + .map_err(|err| BlockValidationError::BlockHashAccountLoadingFailed(err.into()))?; Ok((slot, EvmStorageSlot::new_changed(current_hash, block_hash.into()))) } -/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, -/// [`ChainSpec`], EVM. -/// -/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no -/// state changes are made. -/// -/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 -#[inline] -pub fn apply_beacon_root_contract_call( - chain_spec: &ChainSpec, - block_timestamp: u64, - block_number: u64, - parent_beacon_block_root: Option, - evm: &mut Evm<'_, EXT, DB>, -) -> Result<(), BlockExecutionError> -where - DB::Error: std::fmt::Display, -{ - if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { - return Ok(()); - } - - let parent_beacon_block_root = - parent_beacon_block_root.ok_or(BlockValidationError::MissingParentBeaconBlockRoot)?; - - // if the block number is zero (genesis block) then the parent beacon block root must - // be 0x0 and no system transaction may occur as per EIP-4788 - if block_number == 0 { - if parent_beacon_block_root != B256::ZERO { - return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { - parent_beacon_block_root, - } - .into()); - } - return Ok(()); - } - - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // modify env for pre block call - fill_tx_env_with_beacon_root_contract_call(&mut evm.context.evm.env, parent_beacon_block_root); - - let mut state = match evm.transact() { - Ok(res) => res.state, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::BeaconRootContractCall { - parent_beacon_block_root: Box::new(parent_beacon_block_root), - message: e.to_string(), - } - .into()); - } - }; - - state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - Ok(()) -} - /// Returns a map of addresses to their balance increments if the Shanghai hardfork is active at the /// given timestamp. /// @@ -247,89 +168,3 @@ pub fn insert_post_block_withdrawals_balance_increments( } } } - -/// Applies the post-block call to the EIP-7002 withdrawal requests contract. -/// -/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is -/// returned. Otherwise, the withdrawal requests are returned. -#[inline] -pub fn apply_withdrawal_requests_contract_call( - evm: &mut Evm<'_, EXT, DB>, -) -> Result, BlockExecutionError> -where - DB::Error: std::fmt::Display, -{ - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // modify env for pre block call - fill_tx_env_with_withdrawal_requests_contract_call(&mut evm.context.evm.env); - - let ResultAndState { result, mut state } = match evm.transact() { - Ok(res) => res, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution failed: {e}"), - } - .into()); - } - }; - - // cleanup the state - state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - let mut data = match result { - ExecutionResult::Success { output, .. } => Ok(output.into_data()), - ExecutionResult::Revert { output, .. } => { - Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution reverted: {output}"), - }) - } - ExecutionResult::Halt { reason, .. } => { - Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Withdrawals are encoded as a series of withdrawal requests, each with the following - // format: - // - // +------+--------+--------+ - // | addr | pubkey | amount | - // +------+--------+--------+ - // 20 48 8 - - const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; - let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < WITHDRAWAL_REQUEST_SIZE { - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: "invalid withdrawal request length".to_string(), - } - .into()); - } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut validator_public_key = FixedBytes::<48>::ZERO; - data.copy_to_slice(validator_public_key.as_mut_slice()); - - let amount = data.get_u64(); - - withdrawal_requests.push(Request::WithdrawalRequest(WithdrawalRequest { - source_address, - validator_public_key, - amount, - })); - } - - Ok(withdrawal_requests) -} diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index bfab663dafff..0459cf679ec0 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,10 +1,11 @@ use reth_primitives::{ - keccak256, proofs::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, - B256, U256, + keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, +}; +use reth_storage_api::{ + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, }; -use reth_storage_api::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::TrieUpdates; +use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::db::BundleState; use std::collections::HashMap; @@ -77,6 +78,17 @@ impl StateRootProvider for StateProviderTest { } } +impl StateProofProvider for StateProviderTest { + fn proof( + &self, + _state: &BundleState, + _address: Address, + _slots: &[B256], + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } +} + impl StateProvider for StateProviderTest { fn storage( &self, @@ -89,8 +101,4 @@ impl StateProvider for StateProviderTest { fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { Ok(self.contracts.get(&code_hash).cloned()) } - - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - unimplemented!("proof generation is not supported") - } } diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index af6e64db19c3..42bb64d53c7a 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -29,7 +29,7 @@ tracing.workspace = true bytes.workspace = true thiserror.workspace = true futures-util = "0.3.30" -interprocess = { version = "1.2.1", features = ["tokio_support"] } +interprocess = { version = "2.2.0", features = ["tokio"] } [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index 05ea7ed589d5..e8eff9c8f454 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -1,23 +1,23 @@ //! [`jsonrpsee`] transport adapter implementation for IPC. use crate::stream_codec::StreamCodec; -use futures::StreamExt; -use interprocess::local_socket::tokio::{LocalSocketStream, OwnedReadHalf, OwnedWriteHalf}; +use futures::{StreamExt, TryFutureExt}; +use interprocess::local_socket::{ + tokio::{prelude::*, RecvHalf, SendHalf}, + GenericFilePath, +}; use jsonrpsee::{ async_client::{Client, ClientBuilder}, core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}, }; use std::io; use tokio::io::AsyncWriteExt; -use tokio_util::{ - codec::FramedRead, - compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt}, -}; +use tokio_util::codec::FramedRead; /// Sending end of IPC transport. #[derive(Debug)] pub(crate) struct Sender { - inner: Compat, + inner: SendHalf, } #[async_trait::async_trait] @@ -44,7 +44,7 @@ impl TransportSenderT for Sender { /// Receiving end of IPC transport. #[derive(Debug)] pub(crate) struct Receiver { - pub(crate) inner: FramedRead, StreamCodec>, + pub(crate) inner: FramedRead, } #[async_trait::async_trait] @@ -63,20 +63,17 @@ impl TransportReceiverT for Receiver { pub(crate) struct IpcTransportClientBuilder; impl IpcTransportClientBuilder { - pub(crate) async fn build( - self, - endpoint: impl AsRef, - ) -> Result<(Sender, Receiver), IpcError> { - let endpoint = endpoint.as_ref().to_string(); - let conn = LocalSocketStream::connect(endpoint.clone()) + pub(crate) async fn build(self, path: &str) -> Result<(Sender, Receiver), IpcError> { + let conn = async { path.to_fs_name::() } + .and_then(LocalSocketStream::connect) .await - .map_err(|err| IpcError::FailedToConnect { path: endpoint, err })?; + .map_err(|err| IpcError::FailedToConnect { path: path.to_string(), err })?; - let (rhlf, whlf) = conn.into_split(); + let (recv, send) = conn.split(); Ok(( - Sender { inner: whlf.compat_write() }, - Receiver { inner: FramedRead::new(rhlf.compat(), StreamCodec::stream_incoming()) }, + Sender { inner: send }, + Receiver { inner: FramedRead::new(recv, StreamCodec::stream_incoming()) }, )) } } @@ -92,14 +89,14 @@ impl IpcClientBuilder { /// ``` /// use jsonrpsee::{core::client::ClientT, rpc_params}; /// use reth_ipc::client::IpcClientBuilder; + /// /// # async fn run_client() -> Result<(), Box> { /// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?; /// let response: String = client.request("say_hello", rpc_params![]).await?; - /// # Ok(()) - /// # } + /// # Ok(()) } /// ``` - pub async fn build(self, path: impl AsRef) -> Result { - let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; + pub async fn build(self, name: &str) -> Result { + let (tx, rx) = IpcTransportClientBuilder::default().build(name).await?; Ok(self.build_with_tokio(tx, rx)) } @@ -139,20 +136,24 @@ pub enum IpcError { #[cfg(test)] mod tests { - use crate::server::dummy_endpoint; - use interprocess::local_socket::tokio::LocalSocketListener; + use interprocess::local_socket::ListenerOptions; use super::*; + use crate::server::dummy_name; #[tokio::test] async fn test_connect() { - let endpoint = dummy_endpoint(); - let binding = LocalSocketListener::bind(endpoint.clone()).unwrap(); + let name = &dummy_name(); + + let binding = ListenerOptions::new() + .name(name.as_str().to_fs_name::().unwrap()) + .create_tokio() + .unwrap(); tokio::spawn(async move { let _x = binding.accept().await; }); - let (tx, rx) = IpcTransportClientBuilder::default().build(endpoint).await.unwrap(); + let (tx, rx) = IpcTransportClientBuilder::default().build(name).await.unwrap(); let _ = IpcClientBuilder::default().build_with_tokio(tx, rx); } } diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs deleted file mode 100644 index 85c69c2a64b0..000000000000 --- a/crates/rpc/ipc/src/server/future.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Utilities for handling async code. - -use std::sync::Arc; -use tokio::sync::watch; - -#[derive(Debug, Clone)] -pub(crate) struct StopHandle(watch::Receiver<()>); - -impl StopHandle { - pub(crate) fn new(rx: watch::Receiver<()>) -> Self { - Self(rx) - } - - pub(crate) async fn shutdown(mut self) { - // Err(_) implies that the `sender` has been dropped. - // Ok(_) implies that `stop` has been called. - let _ = self.0.changed().await; - } -} - -/// Server handle. -/// -/// When all [`StopHandle`]'s have been `dropped` or `stop` has been called -/// the server will be stopped. -#[derive(Debug, Clone)] -pub(crate) struct ServerHandle(Arc>); - -impl ServerHandle { - /// Wait for the server to stop. - #[allow(dead_code)] - pub(crate) async fn stopped(self) { - self.0.closed().await - } -} diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 265f1d1a67cf..6dff8a8afae0 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -1,18 +1,19 @@ //! JSON-RPC IPC server implementation -use crate::server::{ - connection::{IpcConn, JsonRpcStream}, - future::StopHandle, -}; +use crate::server::connection::{IpcConn, JsonRpcStream}; use futures::StreamExt; -use futures_util::{future::Either, AsyncWriteExt}; -use interprocess::local_socket::tokio::{LocalSocketListener, LocalSocketStream}; +use futures_util::future::Either; +use interprocess::local_socket::{ + tokio::prelude::{LocalSocketListener, LocalSocketStream}, + traits::tokio::{Listener, Stream}, + GenericFilePath, ListenerOptions, ToFsName, +}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, server::{ middleware::rpc::{RpcLoggerLayer, RpcServiceT}, - AlreadyStoppedError, ConnectionGuard, ConnectionPermit, IdProvider, - RandomIntegerIdProvider, + stop_channel, ConnectionGuard, ConnectionPermit, IdProvider, RandomIntegerIdProvider, + ServerHandle, StopHandle, }, BoundedSubscriptions, MethodSink, Methods, }; @@ -24,8 +25,8 @@ use std::{ task::{Context, Poll}, }; use tokio::{ - io::{AsyncRead, AsyncWrite}, - sync::{oneshot, watch}, + io::{AsyncRead, AsyncWrite, AsyncWriteExt}, + sync::oneshot, }; use tower::{layer::util::Identity, Layer, Service}; use tracing::{debug, instrument, trace, warn, Instrument}; @@ -39,11 +40,9 @@ use crate::{ }; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; -use tokio_util::compat::FuturesAsyncReadCompatExt; use tower::layer::{util::Stack, LayerFn}; mod connection; -mod future; mod ipc; mod rpc_service; @@ -68,17 +67,17 @@ impl IpcServer { impl IpcServer where - RpcMiddleware: Layer + Clone + Send + 'static, - for<'a> >::Service: RpcServiceT<'a>, - HttpMiddleware: Layer> + Send + 'static, - >>::Service: Send - + Service< - String, - Response = Option, - Error = Box, - >, - <>>::Service as Service>::Future: - Send + Unpin, + RpcMiddleware: for<'a> Layer> + Clone + Send + 'static, + HttpMiddleware: Layer< + TowerServiceNoHttp, + Service: Service< + String, + Response = Option, + Error = Box, + Future: Send + Unpin, + > + Send, + > + Send + + 'static, { /// Start responding to connections requests. /// @@ -89,7 +88,7 @@ where /// use jsonrpsee::RpcModule; /// use reth_ipc::server::Builder; /// async fn run_server() -> Result<(), Box> { - /// let server = Builder::default().build("/tmp/my-uds"); + /// let server = Builder::default().build("/tmp/my-uds".into()); /// let mut module = RpcModule::new(()); /// module.register_method("say_hello", |_, _, _| "lo")?; /// let handle = server.start(module).await?; @@ -106,9 +105,8 @@ where methods: impl Into, ) -> Result { let methods = methods.into(); - let (stop_tx, stop_rx) = watch::channel(()); - let stop_handle = StopHandle::new(stop_rx); + let (stop_handle, server_handle) = stop_channel(); // use a signal channel to wait until we're ready to accept connections let (tx, rx) = oneshot::channel(); @@ -119,7 +117,7 @@ where }; rx.await.expect("channel is open")?; - Ok(ServerHandle::new(stop_tx)) + Ok(server_handle) } async fn start_inner( @@ -137,15 +135,19 @@ where } } - let listener = match LocalSocketListener::bind(self.endpoint.clone()) { + let listener = match self + .endpoint + .as_str() + .to_fs_name::() + .and_then(|name| ListenerOptions::new().name(name).create_tokio()) + { + Ok(listener) => listener, Err(err) => { on_ready .send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err })) .ok(); return; } - - Ok(listener) => listener, }; // signal that we're ready to accept connections @@ -164,9 +166,10 @@ where match try_accept_conn(&listener, stopped).await { AcceptConnection::Established { local_socket_stream, stop } => { let Some(conn_permit) = connection_guard.try_acquire() else { - let (mut _reader, mut writer) = local_socket_stream.into_split(); - let _ = writer.write_all(b"Too many connections. Please try again later.").await; - drop((_reader, writer)); + let (_reader, mut writer) = local_socket_stream.split(); + let _ = writer + .write_all(b"Too many connections. Please try again later.") + .await; stopped = stop; continue; }; @@ -177,7 +180,7 @@ where let conn_permit = Arc::new(conn_permit); - process_connection(ProcessConnection{ + process_connection(ProcessConnection { http_middleware: &self.http_middleware, rpc_middleware: self.rpc_middleware.clone(), conn_permit, @@ -193,9 +196,11 @@ where id = id.wrapping_add(1); stopped = stop; } - AcceptConnection::Shutdown => { break; } - AcceptConnection::Err((e, stop)) => { - tracing::error!("Error while awaiting a new IPC connection: {:?}", e); + AcceptConnection::Shutdown => { + break; + } + AcceptConnection::Err((err, stop)) => { + tracing::error!(%err, "Failed accepting a new IPC connection"); stopped = stop; } } @@ -204,7 +209,8 @@ where // Drop the last Sender drop(drop_on_completion); - // Once this channel is closed it is safe to assume that all connections have been gracefully shutdown + // Once this channel is closed it is safe to assume that all connections have been + // gracefully shutdown while process_connection_awaiter.recv().await.is_some() { // Generally, messages should not be sent across this channel, // but we'll loop here to wait for `None` just to be on the safe side @@ -222,10 +228,7 @@ async fn try_accept_conn(listener: &LocalSocketListener, stopped: S) -> Accep where S: Future + Unpin, { - let accept = listener.accept(); - let accept = pin!(accept); - - match futures_util::future::select(accept, stopped).await { + match futures_util::future::select(pin!(listener.accept()), stopped).await { Either::Left((res, stop)) => match res { Ok(local_socket_stream) => AcceptConnection::Established { local_socket_stream, stop }, Err(e) => AcceptConnection::Err((e, stop)), @@ -459,7 +462,7 @@ fn process_connection<'b, RpcMiddleware, HttpMiddleware>( let ipc = IpcConn(tokio_util::codec::Decoder::framed( StreamCodec::stream_incoming(), - local_socket_stream.compat(), + local_socket_stream, )); let (tx, rx) = mpsc::channel::(server_cfg.message_buffer_capacity as usize); @@ -682,9 +685,9 @@ impl Builder { /// #[tokio::main] /// async fn main() { /// let builder = tower::ServiceBuilder::new(); - /// - /// let server = - /// reth_ipc::server::Builder::default().set_http_middleware(builder).build("/tmp/my-uds"); + /// let server = reth_ipc::server::Builder::default() + /// .set_http_middleware(builder) + /// .build("/tmp/my-uds".into()); /// } /// ``` pub fn set_http_middleware( @@ -776,9 +779,9 @@ impl Builder { } /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build(self, endpoint: impl AsRef) -> IpcServer { + pub fn build(self, endpoint: String) -> IpcServer { IpcServer { - endpoint: endpoint.as_ref().to_string(), + endpoint, cfg: self.settings, id_provider: self.id_provider, http_middleware: self.http_middleware, @@ -787,38 +790,8 @@ impl Builder { } } -/// Server handle. -/// -/// When all [`jsonrpsee::server::StopHandle`]'s have been `dropped` or `stop` has been called -/// the server will be stopped. -#[derive(Debug, Clone)] -pub struct ServerHandle(Arc>); - -impl ServerHandle { - /// Create a new server handle. - pub(crate) fn new(tx: watch::Sender<()>) -> Self { - Self(Arc::new(tx)) - } - - /// Tell the server to stop without waiting for the server to stop. - pub fn stop(&self) -> Result<(), AlreadyStoppedError> { - self.0.send(()).map_err(|_| AlreadyStoppedError) - } - - /// Wait for the server to stop. - pub async fn stopped(self) { - self.0.closed().await - } - - /// Check if the server has been stopped. - pub fn is_stopped(&self) -> bool { - self.0.is_closed() - } -} - -/// For testing/examples #[cfg(test)] -pub fn dummy_endpoint() -> String { +pub fn dummy_name() -> String { let num: u64 = rand::Rng::gen(&mut rand::thread_rng()); if cfg!(windows) { format!(r"\\.\pipe\my-pipe-{}", num) @@ -893,8 +866,8 @@ mod tests { #[tokio::test] async fn can_set_the_max_response_body_size() { // init_test_tracing(); - let endpoint = dummy_endpoint(); - let server = Builder::default().max_response_body_size(100).build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().max_response_body_size(100).build(endpoint.clone()); let mut module = RpcModule::new(()); module.register_method("anything", |_, _, _| "a".repeat(101)).unwrap(); let handle = server.start(module).await.unwrap(); @@ -908,8 +881,8 @@ mod tests { #[tokio::test] async fn can_set_the_max_request_body_size() { init_test_tracing(); - let endpoint = dummy_endpoint(); - let server = Builder::default().max_request_body_size(100).build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().max_request_body_size(100).build(endpoint.clone()); let mut module = RpcModule::new(()); module.register_method("anything", |_, _, _| "succeed").unwrap(); let handle = server.start(module).await.unwrap(); @@ -936,16 +909,16 @@ mod tests { async fn can_set_max_connections() { init_test_tracing(); - let endpoint = dummy_endpoint(); - let server = Builder::default().max_connections(2).build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().max_connections(2).build(endpoint.clone()); let mut module = RpcModule::new(()); module.register_method("anything", |_, _, _| "succeed").unwrap(); let handle = server.start(module).await.unwrap(); tokio::spawn(handle.stopped()); - let client1 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); - let client2 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); - let client3 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client1 = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let client2 = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let client3 = IpcClientBuilder::default().build(endpoint).await.unwrap(); let response1: Result = client1.request("anything", rpc_params![]).await; let response2: Result = client2.request("anything", rpc_params![]).await; @@ -961,7 +934,7 @@ mod tests { tokio::time::sleep(std::time::Duration::from_millis(100)).await; // Can connect again - let client4 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client4 = IpcClientBuilder::default().build(endpoint).await.unwrap(); let response4: Result = client4.request("anything", rpc_params![]).await; assert!(response4.is_ok()); } @@ -969,8 +942,8 @@ mod tests { #[tokio::test] async fn test_rpc_request() { init_test_tracing(); - let endpoint = dummy_endpoint(); - let server = Builder::default().build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().build(endpoint.clone()); let mut module = RpcModule::new(()); let msg = r#"{"jsonrpc":"2.0","id":83,"result":"0x7a69"}"#; module.register_method("eth_chainId", move |_, _, _| msg).unwrap(); @@ -984,8 +957,8 @@ mod tests { #[tokio::test] async fn test_batch_request() { - let endpoint = dummy_endpoint(); - let server = Builder::default().build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().build(endpoint.clone()); let mut module = RpcModule::new(()); module.register_method("anything", |_, _, _| "ok").unwrap(); let handle = server.start(module).await.unwrap(); @@ -1009,8 +982,8 @@ mod tests { #[tokio::test] async fn test_ipc_modules() { reth_tracing::init_test_tracing(); - let endpoint = dummy_endpoint(); - let server = Builder::default().build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().build(endpoint.clone()); let mut module = RpcModule::new(()); let msg = r#"{"admin":"1.0","debug":"1.0","engine":"1.0","eth":"1.0","ethash":"1.0","miner":"1.0","net":"1.0","rpc":"1.0","txpool":"1.0","web3":"1.0"}"#; module.register_method("rpc_modules", move |_, _, _| msg).unwrap(); @@ -1024,8 +997,8 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_rpc_subscription() { - let endpoint = dummy_endpoint(); - let server = Builder::default().build(&endpoint); + let endpoint = &dummy_name(); + let server = Builder::default().build(endpoint.clone()); let (tx, _rx) = broadcast::channel::(16); let mut module = RpcModule::new(tx.clone()); @@ -1080,10 +1053,10 @@ mod tests { } reth_tracing::init_test_tracing(); - let endpoint = dummy_endpoint(); + let endpoint = &dummy_name(); let rpc_middleware = RpcServiceBuilder::new().layer_fn(ModifyRequestIf); - let server = Builder::default().set_rpc_middleware(rpc_middleware).build(&endpoint); + let server = Builder::default().set_rpc_middleware(rpc_middleware).build(endpoint.clone()); let mut module = RpcModule::new(()); let goodbye_msg = r#"{"jsonrpc":"2.0","id":1,"result":"goodbye"}"#; diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 00581a1a16f4..59ae5d4cfcdc 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -15,13 +15,21 @@ workspace = true # reth reth-primitives.workspace = true reth-rpc-types.workspace = true +reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } -serde_json.workspace = true serde = { workspace = true, features = ["derive"] } +[dev-dependencies] +serde_json.workspace = true + [features] -client = ["jsonrpsee/client", "jsonrpsee/async-client"] +client = [ + "jsonrpsee/client", + "jsonrpsee/async-client", + "reth-rpc-eth-api/client" +] +optimism = ["reth-rpc-eth-api/optimism"] \ No newline at end of file diff --git a/crates/rpc/rpc-api/src/admin.rs b/crates/rpc/rpc-api/src/admin.rs index 15904fee4989..66f8918a33cb 100644 --- a/crates/rpc/rpc-api/src/admin.rs +++ b/crates/rpc/rpc-api/src/admin.rs @@ -1,7 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_network_peers::AnyNode; -use reth_primitives::NodeRecord; -use reth_rpc_types::{admin::NodeInfo, PeerInfo}; +use reth_network_peers::{AnyNode, NodeRecord}; +use reth_rpc_types::admin::{NodeInfo, PeerInfo}; /// Admin namespace rpc interface that gives access to several non-standard RPC methods. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "admin"))] diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index ccee09cc2b44..580245b1014c 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -26,7 +26,7 @@ pub trait DebugApi { #[method(name = "getRawTransaction")] async fn raw_transaction(&self, hash: B256) -> RpcResult>; - /// Returns an array of EIP-2718 binary-encoded transactions for the given [BlockId]. + /// Returns an array of EIP-2718 binary-encoded transactions for the given [`BlockId`]. #[method(name = "getRawTransactions")] async fn raw_transactions(&self, block_id: BlockId) -> RpcResult>; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index e858f62df0d5..986dd76b14ab 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -8,14 +8,13 @@ use reth_engine_primitives::EngineTypes; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_types::{ engine::{ - ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, - ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, - PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }, state::StateOverride, BlockOverrides, Filter, Log, RichBlock, SyncStatus, TransactionRequest, }; - // NOTE: We can't use associated types in the `EngineApi` trait because of jsonrpsee, so we use a // generic here. It would be nice if the rpc macro would understand which types need to have serde. // By default, if the trait has a generic, the rpc macro will add e.g. `Engine: DeserializeOwned` to @@ -144,6 +143,13 @@ pub trait EngineApi { block_hashes: Vec, ) -> RpcResult; + /// See also + #[method(name = "getPayloadBodiesByHashV2")] + async fn get_payload_bodies_by_hash_v2( + &self, + block_hashes: Vec, + ) -> RpcResult; + /// See also /// /// Returns the execution payload bodies by the range starting at `start`, containing `count` @@ -163,6 +169,16 @@ pub trait EngineApi { count: U64, ) -> RpcResult; + /// See also + /// + /// Similar to `getPayloadBodiesByRangeV1`, but returns [`ExecutionPayloadBodiesV2`] + #[method(name = "getPayloadBodiesByRangeV2")] + async fn get_payload_bodies_by_range_v2( + &self, + start: U64, + count: U64, + ) -> RpcResult; + /// See also /// /// Note: This method will be deprecated after the cancun hardfork: diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs deleted file mode 100644 index 44b5df58a0a6..000000000000 --- a/crates/rpc/rpc-api/src/eth.rs +++ /dev/null @@ -1,310 +0,0 @@ -use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; -use reth_rpc_types::{ - serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, - AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, - FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, Transaction, - TransactionRequest, Work, -}; - -/// Eth rpc interface: -#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] -#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { - /// Returns the protocol version encoded as a string. - #[method(name = "protocolVersion")] - async fn protocol_version(&self) -> RpcResult; - - /// Returns an object with data about the sync status or false. - #[method(name = "syncing")] - fn syncing(&self) -> RpcResult; - - /// Returns the client coinbase address. - #[method(name = "coinbase")] - async fn author(&self) -> RpcResult
; - - /// Returns a list of addresses owned by client. - #[method(name = "accounts")] - fn accounts(&self) -> RpcResult>; - - /// Returns the number of most recent block. - #[method(name = "blockNumber")] - fn block_number(&self) -> RpcResult; - - /// Returns the chain ID of the current network. - #[method(name = "chainId")] - async fn chain_id(&self) -> RpcResult>; - - /// Returns information about a block by hash. - #[method(name = "getBlockByHash")] - async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>; - - /// Returns information about a block by number. - #[method(name = "getBlockByNumber")] - async fn block_by_number( - &self, - number: BlockNumberOrTag, - full: bool, - ) -> RpcResult>; - - /// Returns the number of transactions in a block from a block matching the given block hash. - #[method(name = "getBlockTransactionCountByHash")] - async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns the number of transactions in a block matching the given block number. - #[method(name = "getBlockTransactionCountByNumber")] - async fn block_transaction_count_by_number( - &self, - number: BlockNumberOrTag, - ) -> RpcResult>; - - /// Returns the number of uncles in a block from a block matching the given block hash. - #[method(name = "getUncleCountByBlockHash")] - async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns the number of uncles in a block with given block number. - #[method(name = "getUncleCountByBlockNumber")] - async fn block_uncles_count_by_number( - &self, - number: BlockNumberOrTag, - ) -> RpcResult>; - - /// Returns all transaction receipts for a given block. - #[method(name = "getBlockReceipts")] - async fn block_receipts( - &self, - block_id: BlockId, - ) -> RpcResult>>; - - /// Returns an uncle block of the given block and index. - #[method(name = "getUncleByBlockHashAndIndex")] - async fn uncle_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> RpcResult>; - - /// Returns an uncle block of the given block and index. - #[method(name = "getUncleByBlockNumberAndIndex")] - async fn uncle_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> RpcResult>; - - /// Returns the EIP-2718 encoded transaction if it exists. - /// - /// If this is a EIP-4844 transaction that is in the pool it will include the sidecar. - #[method(name = "getRawTransactionByHash")] - async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns the information about a transaction requested by transaction hash. - #[method(name = "getTransactionByHash")] - async fn transaction_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns information about a raw transaction by block hash and transaction index position. - #[method(name = "getRawTransactionByBlockHashAndIndex")] - async fn raw_transaction_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> RpcResult>; - - /// Returns information about a transaction by block hash and transaction index position. - #[method(name = "getTransactionByBlockHashAndIndex")] - async fn transaction_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> RpcResult>; - - /// Returns information about a raw transaction by block number and transaction index - /// position. - #[method(name = "getRawTransactionByBlockNumberAndIndex")] - async fn raw_transaction_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> RpcResult>; - - /// Returns information about a transaction by block number and transaction index position. - #[method(name = "getTransactionByBlockNumberAndIndex")] - async fn transaction_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> RpcResult>; - - /// Returns the receipt of a transaction by transaction hash. - #[method(name = "getTransactionReceipt")] - async fn transaction_receipt(&self, hash: B256) -> RpcResult>; - - /// Returns the balance of the account of given address. - #[method(name = "getBalance")] - async fn balance(&self, address: Address, block_number: Option) -> RpcResult; - - /// Returns the value from a storage position at a given address - #[method(name = "getStorageAt")] - async fn storage_at( - &self, - address: Address, - index: JsonStorageKey, - block_number: Option, - ) -> RpcResult; - - /// Returns the number of transactions sent from an address at given block number. - #[method(name = "getTransactionCount")] - async fn transaction_count( - &self, - address: Address, - block_number: Option, - ) -> RpcResult; - - /// Returns code at a given address at given block number. - #[method(name = "getCode")] - async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; - - /// Returns the block's header at given number. - #[method(name = "getHeaderByNumber")] - async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; - - /// Returns the block's header at given hash. - #[method(name = "getHeaderByHash")] - async fn header_by_hash(&self, hash: B256) -> RpcResult>; - - /// Executes a new message call immediately without creating a transaction on the block chain. - #[method(name = "call")] - async fn call( - &self, - request: TransactionRequest, - block_number: Option, - state_overrides: Option, - block_overrides: Option>, - ) -> RpcResult; - - /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the - /// optionality of state overrides - #[method(name = "callMany")] - async fn call_many( - &self, - bundle: Bundle, - state_context: Option, - state_override: Option, - ) -> RpcResult>; - - /// Generates an access list for a transaction. - /// - /// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction. - /// - /// An access list contains all storage slots and addresses touched by the transaction, except - /// for the sender account and the chain's precompiles. - /// - /// It returns list of addresses and storage keys used by the transaction, plus the gas - /// consumed when the access list is added. That is, it gives you the list of addresses and - /// storage keys that will be used by that transaction, plus the gas consumed if the access - /// list is included. Like eth_estimateGas, this is an estimation; the list could change - /// when the transaction is actually mined. Adding an accessList to your transaction does - /// not necessary result in lower gas usage compared to a transaction without an access - /// list. - #[method(name = "createAccessList")] - async fn create_access_list( - &self, - request: TransactionRequest, - block_number: Option, - ) -> RpcResult; - - /// Generates and returns an estimate of how much gas is necessary to allow the transaction to - /// complete. - #[method(name = "estimateGas")] - async fn estimate_gas( - &self, - request: TransactionRequest, - block_number: Option, - state_override: Option, - ) -> RpcResult; - - /// Returns the current price per gas in wei. - #[method(name = "gasPrice")] - async fn gas_price(&self) -> RpcResult; - - /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. - #[method(name = "maxPriorityFeePerGas")] - async fn max_priority_fee_per_gas(&self) -> RpcResult; - - /// Introduced in EIP-4844, returns the current blob base fee in wei. - #[method(name = "blobBaseFee")] - async fn blob_base_fee(&self) -> RpcResult; - - /// Returns the Transaction fee history - /// - /// Introduced in EIP-1559 for getting information on the appropriate priority fee to use. - /// - /// Returns transaction base fee per gas and effective priority fee per gas for the - /// requested/supported block range. The returned Fee history for the returned block range - /// can be a subsection of the requested range if not all blocks are available. - #[method(name = "feeHistory")] - async fn fee_history( - &self, - block_count: U64, - newest_block: BlockNumberOrTag, - reward_percentiles: Option>, - ) -> RpcResult; - - /// Returns whether the client is actively mining new blocks. - #[method(name = "mining")] - async fn is_mining(&self) -> RpcResult; - - /// Returns the number of hashes per second that the node is mining with. - #[method(name = "hashrate")] - async fn hashrate(&self) -> RpcResult; - - /// Returns the hash of the current block, the seedHash, and the boundary condition to be met - /// (“target”) - #[method(name = "getWork")] - async fn get_work(&self) -> RpcResult; - - /// Used for submitting mining hashrate. - /// - /// Can be used for remote miners to submit their hash rate. - /// It accepts the miner hash rate and an identifier which must be unique between nodes. - /// Returns `true` if the block was successfully submitted, `false` otherwise. - #[method(name = "submitHashrate")] - async fn submit_hashrate(&self, hashrate: U256, id: B256) -> RpcResult; - - /// Used for submitting a proof-of-work solution. - #[method(name = "submitWork")] - async fn submit_work(&self, nonce: B64, pow_hash: B256, mix_digest: B256) -> RpcResult; - - /// Sends transaction; will block waiting for signer to return the - /// transaction hash. - #[method(name = "sendTransaction")] - async fn send_transaction(&self, request: TransactionRequest) -> RpcResult; - - /// Sends signed transaction, returning its hash. - #[method(name = "sendRawTransaction")] - async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult; - - /// Returns an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" - /// + len(message) + message))). - #[method(name = "sign")] - async fn sign(&self, address: Address, message: Bytes) -> RpcResult; - - /// Signs a transaction that can be submitted to the network at a later time using with - /// `sendRawTransaction.` - #[method(name = "signTransaction")] - async fn sign_transaction(&self, transaction: TransactionRequest) -> RpcResult; - - /// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). - #[method(name = "signTypedData")] - async fn sign_typed_data(&self, address: Address, data: serde_json::Value) -> RpcResult; - - /// Returns the account and storage values of the specified account including the Merkle-proof. - /// This call can be used to verify that the data you are pulling from is not tampered with. - #[method(name = "getProof")] - async fn get_proof( - &self, - address: Address, - keys: Vec, - block_number: Option, - ) -> RpcResult; -} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 82af34a86d73..cb84f8388788 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -16,12 +16,8 @@ mod admin; mod anvil; -mod bundle; mod debug; mod engine; -mod eth; -mod eth_filter; -mod eth_pubsub; mod ganache; mod hardhat; mod mev; @@ -42,12 +38,8 @@ pub use servers::*; pub mod servers { pub use crate::{ admin::AdminApiServer, - bundle::{EthBundleApiServer, EthCallBundleApiServer}, debug::DebugApiServer, engine::{EngineApiServer, EngineEthApiServer}, - eth::EthApiServer, - eth_filter::EthFilterApiServer, - eth_pubsub::EthPubSubApiServer, mev::MevApiServer, net::NetApiServer, otterscan::OtterscanServer, @@ -58,6 +50,10 @@ pub mod servers { validation::BlockSubmissionValidationApiServer, web3::Web3ApiServer, }; + pub use reth_rpc_eth_api::{ + self as eth, EthApiServer, EthBundleApiServer, EthCallBundleApiServer, EthFilterApiServer, + EthPubSubApiServer, + }; } /// re-export of all client traits @@ -70,11 +66,8 @@ pub mod clients { pub use crate::{ admin::AdminApiClient, anvil::AnvilApiClient, - bundle::{EthBundleApiClient, EthCallBundleApiClient}, debug::DebugApiClient, engine::{EngineApiClient, EngineEthApiClient}, - eth::EthApiClient, - eth_filter::EthFilterApiClient, ganache::GanacheApiClient, hardhat::HardhatApiClient, mev::MevApiClient, @@ -86,4 +79,7 @@ pub mod clients { validation::BlockSubmissionValidationApiClient, web3::Web3ApiClient, }; + pub use reth_rpc_eth_api::{ + EthApiClient, EthBundleApiClient, EthCallBundleApiClient, EthFilterApiClient, + }; } diff --git a/crates/rpc/rpc-api/src/mev.rs b/crates/rpc/rpc-api/src/mev.rs index 008535276328..ebe6f5ee8708 100644 --- a/crates/rpc/rpc-api/src/mev.rs +++ b/crates/rpc/rpc-api/src/mev.rs @@ -1,5 +1,5 @@ use jsonrpsee::proc_macros::rpc; -use reth_rpc_types::{ +use reth_rpc_types::mev::{ SendBundleRequest, SendBundleResponse, SimBundleOverrides, SimBundleResponse, }; diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index 2156765bb206..a06fa1a4ddaf 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,17 +1,27 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, TxHash, B256}; +use reth_primitives::{Address, BlockId, Bytes, TxHash, B256}; use reth_rpc_types::{ trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, TransactionsWithReceipts, }, - Transaction, + Header, }; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "ots"))] pub trait Otterscan { + /// Get the block header by block number, required by otterscan. + /// Otterscan currently requires this endpoint, used as: + /// + /// 1. check if the node is Erigon or not + /// 2. get block header instead of the full block + /// + /// Ref: + #[method(name = "getHeaderByNumber", aliases = ["erigon_getHeaderByNumber"])] + async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; + /// Check if a certain address contains a deployed code. #[method(name = "hasCode")] async fn has_code(&self, address: Address, block_number: Option) -> RpcResult; @@ -33,15 +43,12 @@ pub trait Otterscan { /// Extract all variations of calls, contract creation and self-destructs and returns a call /// tree. #[method(name = "traceTransaction")] - async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult; + async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult>>; /// Tailor-made and expanded version of eth_getBlockByNumber for block details page in /// Otterscan. #[method(name = "getBlockDetails")] - async fn get_block_details( - &self, - block_number: BlockNumberOrTag, - ) -> RpcResult>; + async fn get_block_details(&self, block_number: u64) -> RpcResult>; /// Tailor-made and expanded version of eth_getBlockByHash for block details page in Otterscan. #[method(name = "getBlockDetailsByHash")] @@ -51,7 +58,7 @@ pub trait Otterscan { #[method(name = "getBlockTransactions")] async fn get_block_transactions( &self, - block_number: BlockNumberOrTag, + block_number: u64, page_number: usize, page_size: usize, ) -> RpcResult; @@ -61,7 +68,7 @@ pub trait Otterscan { async fn search_transactions_before( &self, address: Address, - block_number: BlockNumberOrTag, + block_number: u64, page_size: usize, ) -> RpcResult; @@ -70,7 +77,7 @@ pub trait Otterscan { async fn search_transactions_after( &self, address: Address, - block_number: BlockNumberOrTag, + block_number: u64, page_size: usize, ) -> RpcResult; @@ -80,7 +87,7 @@ pub trait Otterscan { &self, sender: Address, nonce: u64, - ) -> RpcResult>; + ) -> RpcResult>; /// Gets the transaction hash and the address who created a contract. #[method(name = "getContractCreator")] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index fa1aabae7118..d97b23b5125b 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -19,7 +19,9 @@ reth-node-core.workspace = true reth-provider.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true +reth-rpc-eth-api.workspace = true reth-rpc-layer.workspace = true +reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true @@ -43,8 +45,10 @@ thiserror.workspace = true tracing.workspace = true [dev-dependencies] +reth-chainspec.workspace = true reth-beacon-consensus.workspace = true reth-network-api.workspace = true +reth-network-peers.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 89e208429a70..d3a7f45509cc 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -7,8 +7,8 @@ use jsonrpsee::{ Methods, }; use reth_engine_primitives::EngineTypes; -use reth_rpc::EthSubscriptionIdProvider; use reth_rpc_api::servers::*; +use reth_rpc_eth_types::EthSubscriptionIdProvider; use reth_rpc_layer::{ secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, JwtAuthValidator, JwtSecret, @@ -68,7 +68,7 @@ impl AuthServerConfig { .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; let handle = server.start(module.inner.clone()); - let mut ipc_handle: Option = None; + let mut ipc_handle: Option = None; if let Some(ipc_server_config) = ipc_server_config { let ipc_endpoint_str = ipc_endpoint @@ -241,7 +241,7 @@ pub struct AuthServerHandle { handle: jsonrpsee::server::ServerHandle, secret: JwtSecret, ipc_endpoint: Option, - ipc_handle: Option, + ipc_handle: Option, } // === impl AuthServerHandle === @@ -298,7 +298,7 @@ impl AuthServerHandle { pub async fn ipc_client(&self) -> Option { use reth_ipc::client::IpcClientBuilder; - if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { + if let Some(ipc_endpoint) = &self.ipc_endpoint { return Some( IpcClientBuilder::default() .build(ipc_endpoint) @@ -310,7 +310,7 @@ impl AuthServerHandle { } /// Returns an ipc handle - pub fn ipc_handle(&self) -> Option { + pub fn ipc_handle(&self) -> Option { self.ipc_handle.clone() } diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 45cad81cd7f0..1f61f57919f2 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -4,7 +4,7 @@ use crate::{ }; use jsonrpsee::server::ServerBuilder; use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path}; -use reth_rpc::eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig}; +use reth_rpc_eth_types::{EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; use std::{net::SocketAddr, path::PathBuf}; @@ -91,9 +91,11 @@ impl RethRpcServerConfig for RpcServerArgs { .max_tracing_requests(self.rpc_max_tracing_requests) .max_blocks_per_filter(self.rpc_max_blocks_per_filter.unwrap_or_max()) .max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize) + .eth_proof_window(self.rpc_eth_proof_window) .rpc_gas_cap(self.rpc_gas_cap) .state_cache(self.state_cache_config()) .gpo_config(self.gas_price_oracle_config()) + .proof_permits(self.rpc_proof_permits) } fn state_cache_config(&self) -> EthStateCacheConfig { @@ -216,7 +218,7 @@ impl RethRpcServerConfig for RpcServerArgs { mod tests { use clap::{Args, Parser}; use reth_node_core::args::RpcServerArgs; - use reth_rpc::eth::RPC_DEFAULT_GAS_CAP; + use reth_rpc_eth_types::RPC_DEFAULT_GAS_CAP; use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 5d0d064247a9..b9b2d63ef331 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,30 +1,138 @@ -use reth_rpc::{ - eth::{ - cache::{EthStateCache, EthStateCacheConfig}, - gas_oracle::GasPriceOracleConfig, - EthFilterConfig, FeeHistoryCacheConfig, RPC_DEFAULT_GAS_CAP, - }, - EthApi, EthFilter, EthPubSub, +use std::{fmt::Debug, time::Duration}; + +use reth_evm::ConfigureEvm; +use reth_network_api::NetworkInfo; +use reth_provider::{ + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, + FullRpcProvider, StateProviderFactory, +}; +use reth_rpc::{eth::EthFilterConfig, EthApi, EthFilter, EthPubSub}; +use reth_rpc_eth_types::{ + cache::cache_new_blocks_task, fee_history::fee_history_cache_new_blocks_task, EthStateCache, + EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP, }; use reth_rpc_server_types::constants::{ - default_max_tracing_requests, DEFAULT_MAX_BLOCKS_PER_FILTER, DEFAULT_MAX_LOGS_PER_RESPONSE, + default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKS_PER_FILTER, + DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_PROOF_PERMITS, }; -use reth_tasks::pool::BlockingTaskPool; +use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; +use reth_transaction_pool::TransactionPool; use serde::{Deserialize, Serialize}; -/// All handlers for the `eth` namespace +/// Default value for stale filter ttl +const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60); + +/// Alias for function that builds the core `eth` namespace API. +pub type EthApiBuilder = + Box) -> EthApi>; + +/// Handlers for core, filter and pubsub `eth` namespace APIs. #[derive(Debug, Clone)] -pub struct EthHandlers { +pub struct EthHandlers { /// Main `eth_` request handler - pub api: EthApi, + pub api: EthApi, /// The async caching layer used by the eth handlers pub cache: EthStateCache, /// Polling based filter handler available on all transports pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) pub pubsub: EthPubSub, - /// The configured tracing call pool - pub blocking_task_pool: BlockingTaskPool, +} + +impl EthHandlers { + /// Returns a new [`EthHandlers`] builder. + #[allow(clippy::too_many_arguments)] + pub fn builder( + provider: Provider, + pool: Pool, + network: Network, + evm_config: EvmConfig, + config: EthConfig, + executor: Tasks, + events: Events, + eth_api_builder: EthApiB, + ) -> EthHandlersBuilder + where + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + { + EthHandlersBuilder { + provider, + pool, + network, + evm_config, + config, + executor, + events, + eth_api_builder: Box::new(eth_api_builder), + } + } +} + +/// Builds [`EthHandlers`] for core, filter, and pubsub `eth_` apis. +#[allow(missing_debug_implementations)] +pub struct EthHandlersBuilder { + provider: Provider, + pool: Pool, + network: Network, + evm_config: EvmConfig, + config: EthConfig, + executor: Tasks, + events: Events, + eth_api_builder: EthApiBuilder, +} + +impl + EthHandlersBuilder +where + Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Pool: Send + Sync + Clone + 'static, + EvmConfig: ConfigureEvm, + Network: Clone, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone, +{ + /// Returns a new instance with handlers for `eth` namespace. + pub fn build(self) -> EthHandlers { + let Self { provider, pool, network, evm_config, config, executor, events, eth_api_builder } = + self; + + let cache = EthStateCache::spawn_with( + provider.clone(), + config.cache, + executor.clone(), + evm_config.clone(), + ); + + let new_canonical_blocks = events.canonical_state_stream(); + let c = cache.clone(); + executor.spawn_critical( + "cache canonical blocks task", + Box::pin(async move { + cache_new_blocks_task(c, new_canonical_blocks).await; + }), + ); + + let ctx = EthApiBuilderCtx { + provider, + pool, + network, + evm_config, + config, + executor, + events, + cache, + }; + + let api = eth_api_builder(&ctx); + + let filter = EthFilterApiBuilder::build(&ctx); + + let pubsub = EthPubSubApiBuilder::build(&ctx); + + EthHandlers { api, cache: ctx.cache, filter, pubsub } + } } /// Additional config values for the eth namespace. @@ -34,6 +142,8 @@ pub struct EthConfig { pub cache: EthStateCacheConfig, /// Settings for the gas price oracle pub gas_oracle: GasPriceOracleConfig, + /// The maximum number of blocks into the past for generating state proofs. + pub eth_proof_window: u64, /// The maximum number of tracing calls that can be executed in concurrently. pub max_tracing_requests: usize, /// Maximum number of blocks that could be scanned per filter request in `eth_getLogs` calls. @@ -46,9 +156,11 @@ pub struct EthConfig { pub rpc_gas_cap: u64, /// /// Sets TTL for stale filters - pub stale_filter_ttl: std::time::Duration, + pub stale_filter_ttl: Duration, /// Settings for the fee history cache pub fee_history_cache: FeeHistoryCacheConfig, + /// The maximum number of getproof calls that can be executed concurrently. + pub proof_permits: usize, } impl EthConfig { @@ -61,20 +173,19 @@ impl EthConfig { } } -/// Default value for stale filter ttl -const DEFAULT_STALE_FILTER_TTL: std::time::Duration = std::time::Duration::from_secs(5 * 60); - impl Default for EthConfig { fn default() -> Self { Self { cache: EthStateCacheConfig::default(), gas_oracle: GasPriceOracleConfig::default(), + eth_proof_window: DEFAULT_ETH_PROOF_WINDOW, max_tracing_requests: default_max_tracing_requests(), max_blocks_per_filter: DEFAULT_MAX_BLOCKS_PER_FILTER, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(), stale_filter_ttl: DEFAULT_STALE_FILTER_TTL, fee_history_cache: FeeHistoryCacheConfig::default(), + proof_permits: DEFAULT_PROOF_PERMITS, } } } @@ -115,4 +226,172 @@ impl EthConfig { self.rpc_gas_cap = rpc_gas_cap; self } + + /// Configures the maximum proof window for historical proof generation. + pub const fn eth_proof_window(mut self, window: u64) -> Self { + self.eth_proof_window = window; + self + } + + /// Configures the number of getproof requests + pub const fn proof_permits(mut self, permits: usize) -> Self { + self.proof_permits = permits; + self + } +} + +/// Context for building the `eth` namespace API. +#[derive(Debug, Clone)] +pub struct EthApiBuilderCtx { + /// Database handle. + pub provider: Provider, + /// Mempool handle. + pub pool: Pool, + /// Network handle. + pub network: Network, + /// EVM configuration. + pub evm_config: EvmConfig, + /// RPC config for `eth` namespace. + pub config: EthConfig, + /// Runtime handle. + pub executor: Tasks, + /// Events handle. + pub events: Events, + /// RPC cache handle. + pub cache: EthStateCache, +} + +/// Ethereum layer one `eth` RPC server builder. +#[derive(Default, Debug, Clone, Copy)] +pub struct EthApiBuild; + +impl EthApiBuild { + /// Builds the [`EthApiServer`](reth_rpc_eth_api::EthApiServer), for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> EthApi + where + Provider: FullRpcProvider, + Pool: TransactionPool, + Network: NetworkInfo + Clone, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions, + EvmConfig: ConfigureEvm, + { + let gas_oracle = GasPriceOracleBuilder::build(ctx); + let fee_history_cache = FeeHistoryCacheBuilder::build(ctx); + + EthApi::with_spawner( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.network.clone(), + ctx.cache.clone(), + gas_oracle, + ctx.config.rpc_gas_cap, + ctx.config.eth_proof_window, + Box::new(ctx.executor.clone()), + BlockingTaskPool::build().expect("failed to build blocking task pool"), + fee_history_cache, + ctx.evm_config.clone(), + None, + ctx.config.proof_permits, + ) + } +} + +/// Builds the `eth_` namespace API [`EthFilterApiServer`](reth_rpc_eth_api::EthFilterApiServer). +#[derive(Debug)] +pub struct EthFilterApiBuilder; + +impl EthFilterApiBuilder { + /// Builds the [`EthFilterApiServer`](reth_rpc_eth_api::EthFilterApiServer), for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> EthFilter + where + Provider: Send + Sync + Clone + 'static, + Pool: Send + Sync + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + { + EthFilter::new( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.cache.clone(), + ctx.config.filter_config(), + Box::new(ctx.executor.clone()), + ) + } +} + +/// Builds the `eth_` namespace API [`EthPubSubApiServer`](reth_rpc_eth_api::EthFilterApiServer). +#[derive(Debug)] +pub struct EthPubSubApiBuilder; + +impl EthPubSubApiBuilder { + /// Builds the [`EthPubSubApiServer`](reth_rpc_eth_api::EthPubSubApiServer), for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> EthPubSub + where + Provider: Clone, + Pool: Clone, + Events: Clone, + Network: Clone, + Tasks: TaskSpawner + Clone + 'static, + { + EthPubSub::with_spawner( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.events.clone(), + ctx.network.clone(), + Box::new(ctx.executor.clone()), + ) + } +} + +/// Builds `eth_` core api component [`GasPriceOracle`], for given context. +#[derive(Debug)] +pub struct GasPriceOracleBuilder; + +impl GasPriceOracleBuilder { + /// Builds a [`GasPriceOracle`], for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> GasPriceOracle + where + Provider: BlockReaderIdExt + Clone, + { + GasPriceOracle::new(ctx.provider.clone(), ctx.config.gas_oracle, ctx.cache.clone()) + } +} + +/// Builds `eth_` core api component [`FeeHistoryCache`], for given context. +#[derive(Debug)] +pub struct FeeHistoryCacheBuilder; + +impl FeeHistoryCacheBuilder { + /// Builds a [`FeeHistoryCache`], for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> FeeHistoryCache + where + Provider: ChainSpecProvider + BlockReaderIdExt + Clone + 'static, + Tasks: TaskSpawner, + Events: CanonStateSubscriptions, + { + let fee_history_cache = + FeeHistoryCache::new(ctx.cache.clone(), ctx.config.fee_history_cache); + + let new_canonical_blocks = ctx.events.canonical_state_stream(); + let fhc = fee_history_cache.clone(); + let provider = ctx.provider.clone(); + ctx.executor.spawn_critical( + "cache canonical blocks for fee history task", + Box::pin(async move { + fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider).await; + }), + ); + + fee_history_cache + } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 2320d5e52e4f..97c83dec3a00 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -8,9 +8,8 @@ //! transaction pool. [`RpcModuleBuilder::build`] returns a [`TransportRpcModules`] which contains //! the transport specific config (what APIs are available via this transport). //! -//! The [`RpcServerConfig`] is used to configure the [`RpcServer`] type which contains all transport -//! implementations (http server, ws server, ipc server). [`RpcServer::start`] requires the -//! [`TransportRpcModules`] so it can start the servers with the configured modules. +//! The [`RpcServerConfig`] is used to assemble and start the http server, ws server, ipc servers, +//! it requires the [`TransportRpcModules`] so it can start the servers with the configured modules. //! //! # Examples //! @@ -19,13 +18,12 @@ //! ``` //! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{ -//! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, -//! ChangeSetReader, EvmEnvProvider, StateProviderFactory, -//! }; +//! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc_builder::{ -//! RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, +//! EthApiBuild, RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, +//! TransportRpcModuleConfig, //! }; +//! //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! pub async fn launch( @@ -35,19 +33,11 @@ //! events: Events, //! evm_config: EvmConfig, //! ) where -//! Provider: AccountReader -//! + BlockReaderIdExt -//! + ChainSpecProvider -//! + ChangeSetReader -//! + StateProviderFactory -//! + EvmEnvProvider -//! + Clone -//! + Unpin -//! + 'static, -//! Pool: TransactionPool + Clone + 'static, +//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Pool: TransactionPool + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, -//! EvmConfig: ConfigureEvm + 'static, +//! EvmConfig: ConfigureEvm, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -64,12 +54,11 @@ //! events, //! evm_config, //! ) -//! .build(transports); +//! .build(transports, EthApiBuild::build); //! let handle = RpcServerConfig::default() //! .with_http(ServerBuilder::default()) -//! .start(transport_modules) -//! .await -//! .unwrap(); +//! .start(&transport_modules) +//! .await; //! } //! ``` //! @@ -80,13 +69,10 @@ //! use reth_engine_primitives::EngineTypes; //! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{ -//! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, -//! ChangeSetReader, EvmEnvProvider, StateProviderFactory, -//! }; +//! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc_api::EngineApiServer; //! use reth_rpc_builder::{ -//! auth::AuthServerConfig, RethRpcModule, RpcModuleBuilder, RpcServerConfig, +//! auth::AuthServerConfig, EthApiBuild, RethRpcModule, RpcModuleBuilder, RpcServerConfig, //! TransportRpcModuleConfig, //! }; //! use reth_rpc_layer::JwtSecret; @@ -101,21 +87,13 @@ //! engine_api: EngineApi, //! evm_config: EvmConfig, //! ) where -//! Provider: AccountReader -//! + BlockReaderIdExt -//! + ChainSpecProvider -//! + ChangeSetReader -//! + StateProviderFactory -//! + EvmEnvProvider -//! + Clone -//! + Unpin -//! + 'static, -//! Pool: TransactionPool + Clone + 'static, +//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Pool: TransactionPool + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes + 'static, -//! EvmConfig: ConfigureEvm + 'static, +//! EvmConfig: ConfigureEvm, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -135,15 +113,14 @@ //! //! // configure the server modules //! let (modules, auth_module, _registry) = -//! builder.build_with_auth_server(transports, engine_api); +//! builder.build_with_auth_server(transports, engine_api, EthApiBuild::build); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); //! let config = RpcServerConfig::default(); //! //! let (_rpc_handle, _auth_handle) = -//! try_join!(modules.start_server(config), auth_module.start_server(auth_config),) -//! .unwrap(); +//! try_join!(config.start(&modules), auth_module.start_server(auth_config),).unwrap(); //! } //! ``` @@ -155,53 +132,49 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::{ - auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, - metrics::RpcRequestMetrics, +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, }; + use error::{ConflictingModules, RpcError, ServerKind}; use http::{header::AUTHORIZATION, HeaderMap}; use jsonrpsee::{ core::RegisterMethodError, - server::{AlreadyStoppedError, IdProvider, RpcServiceBuilder, Server, ServerHandle}, + server::{AlreadyStoppedError, IdProvider, RpcServiceBuilder, ServerHandle}, Methods, RpcModule, }; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_ipc::server::IpcServer; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ - AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - ChangeSetReader, EvmEnvProvider, StateProviderFactory, + AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, FullRpcProvider, StateProviderFactory, }; use reth_rpc::{ - eth::{ - cache::{cache_new_blocks_task, EthStateCache}, - fee_history_cache_new_blocks_task, - gas_oracle::GasPriceOracle, - traits::RawTransactionForwarder, - EthBundle, FeeHistoryCache, - }, - AdminApi, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, EthSubscriptionIdProvider, - NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, Web3Api, + AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, + TxPoolApi, Web3Api, }; use reth_rpc_api::servers::*; -use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; -use reth_tasks::{ - pool::{BlockingTaskGuard, BlockingTaskPool}, - TaskSpawner, TokioTaskExecutor, +use reth_rpc_eth_api::{ + helpers::{ + Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt, UpdateRawTxForwarder, + }, + EthApiServer, FullEthApiServer, RawTransactionForwarder, }; +use reth_rpc_eth_types::{EthStateCache, EthSubscriptionIdProvider}; +use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; +use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; -use std::{ - collections::HashMap, - fmt, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - sync::Arc, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; use tower_http::cors::CorsLayer; -use tracing::{instrument, trace}; + +use crate::{ + auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, + metrics::RpcRequestMetrics, +}; // re-export for convenience pub use jsonrpsee::server::ServerBuilder; @@ -224,15 +197,18 @@ mod cors; pub mod error; /// Eth utils -mod eth; -pub use eth::{EthConfig, EthHandlers}; +pub mod eth; +pub use eth::{ + EthApiBuild, EthApiBuilderCtx, EthConfig, EthHandlers, FeeHistoryCacheBuilder, + GasPriceOracleBuilder, +}; // Rpc server metrics mod metrics; /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] -pub async fn launch( +pub async fn launch( provider: Provider, pool: Pool, network: Network, @@ -241,28 +217,26 @@ pub async fn launch( executor: Tasks, events: Events, evm_config: EvmConfig, + eth: EthApiB, ) -> Result where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + EthApi: FullEthApiServer, { let module_config = module_config.into(); - let server_config = server_config.into(); - RpcModuleBuilder::new(provider, pool, network, executor, events, evm_config) - .build(module_config) - .start_server(server_config) + server_config + .into() + .start( + &RpcModuleBuilder::new(provider, pool, network, executor, events, evm_config) + .build(module_config, eth), + ) .await } @@ -329,8 +303,8 @@ impl /// Configure a [`NoopTransactionPool`] instance. /// /// Caution: This will configure a pool API that does absolutely nothing. - /// This is only intended for allow easier setup of namespaces that depend on the [`EthApi`] - /// which requires a [`TransactionPool`] implementation. + /// This is only intended for allow easier setup of namespaces that depend on the + /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`TransactionPool`] implementation. pub fn with_noop_pool( self, ) -> RpcModuleBuilder { @@ -360,8 +334,8 @@ impl /// Configure a [`NoopNetwork`] instance. /// /// Caution: This will configure a network API that does absolutely nothing. - /// This is only intended for allow easier setup of namespaces that depend on the [`EthApi`] - /// which requires a [`NetworkInfo`] implementation. + /// This is only intended for allow easier setup of namespaces that depend on the + /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, ) -> RpcModuleBuilder { @@ -434,20 +408,12 @@ impl impl RpcModuleBuilder where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -455,25 +421,31 @@ where /// This behaves exactly as [`RpcModuleBuilder::build`] for the [`TransportRpcModules`], but /// also configures the auth (engine api) server, which exposes a subset of the `eth_` /// namespace. - pub fn build_with_auth_server( + #[allow(clippy::type_complexity)] + pub fn build_with_auth_server( self, module_config: TransportRpcModuleConfig, engine: EngineApi, + eth: EthApiB, ) -> ( TransportRpcModules, AuthRpcModule, - RethModuleRegistry, + RpcRegistryInner, ) where EngineT: EngineTypes + 'static, EngineApi: EngineApiServer, + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + EthApi: FullEthApiServer, { let Self { provider, pool, network, executor, events, evm_config } = self; let config = module_config.config.clone().unwrap_or_default(); - let mut registry = - RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config); + let mut registry = RpcRegistryInner::new( + provider, pool, network, executor, events, config, evm_config, eth, + ); let modules = registry.create_transport_rpc_modules(module_config); @@ -482,7 +454,7 @@ where (modules, auth_module, registry) } - /// Converts the builder into a [`RethModuleRegistry`] which can be used to create all + /// Converts the builder into a [`RpcRegistryInner`] which can be used to create all /// components. /// /// This is useful for getting access to API handlers directly: @@ -493,7 +465,7 @@ where /// use reth_evm::ConfigureEvm; /// use reth_network_api::noop::NoopNetwork; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; - /// use reth_rpc_builder::RpcModuleBuilder; + /// use reth_rpc_builder::{EthApiBuild, RpcModuleBuilder}; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; /// @@ -505,24 +477,36 @@ where /// .with_executor(TokioTaskExecutor::default()) /// .with_events(TestCanonStateSubscriptions::default()) /// .with_evm_config(evm) - /// .into_registry(Default::default()); + /// .into_registry(Default::default(), EthApiBuild::build); /// /// let eth_api = registry.eth_api(); /// } /// ``` - pub fn into_registry( + pub fn into_registry( self, config: RpcModuleConfig, - ) -> RethModuleRegistry { + eth: EthApiB, + ) -> RpcRegistryInner + where + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + { let Self { provider, pool, network, executor, events, evm_config } = self; - RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config) + RpcRegistryInner::new(provider, pool, network, executor, events, config, evm_config, eth) } /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). - /// - /// See also [`RpcServer::start`] - pub fn build(self, module_config: TransportRpcModuleConfig) -> TransportRpcModules<()> { + pub fn build( + self, + module_config: TransportRpcModuleConfig, + eth: EthApiB, + ) -> TransportRpcModules<()> + where + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + EthApi: FullEthApiServer, + { let mut modules = TransportRpcModules::default(); let Self { provider, pool, network, executor, events, evm_config } = self; @@ -530,7 +514,7 @@ where if !module_config.is_empty() { let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); - let mut registry = RethModuleRegistry::new( + let mut registry = RpcRegistryInner::new( provider, pool, network, @@ -538,6 +522,7 @@ where events, config.unwrap_or_default(), evm_config, + eth, ); modules.config = module_config; @@ -626,34 +611,34 @@ impl RpcModuleConfigBuilder { /// A Helper type the holds instances of the configured modules. #[derive(Debug, Clone)] -pub struct RethModuleRegistry { +pub struct RpcRegistryInner { provider: Provider, pool: Pool, network: Network, executor: Tasks, events: Events, - /// Defines how to configure the EVM before execution. - evm_config: EvmConfig, - /// Additional settings for handlers. - config: RpcModuleConfig, - /// Holds a clone of all the eth namespace handlers - eth: Option>, + /// Holds a all `eth_` namespace handlers + eth: EthHandlers, /// to put trace calls behind semaphore blocking_pool_guard: BlockingTaskGuard, /// Contains the [Methods] of a module modules: HashMap, - /// Optional forwarder for `eth_sendRawTransaction` - // TODO(mattsse): find a more ergonomic way to configure eth/rpc customizations - eth_raw_transaction_forwarder: Option>, } -// === impl RethModuleRegistry === +// === impl RpcRegistryInner === -impl - RethModuleRegistry +impl + RpcRegistryInner +where + Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Pool: Send + Sync + Clone + 'static, + Network: Clone, + Events: CanonStateSubscriptions + Clone, + Tasks: TaskSpawner + Clone + 'static, { /// Creates a new, empty instance. - pub fn new( + #[allow(clippy::too_many_arguments)] + pub fn new( provider: Provider, pool: Pool, network: Network, @@ -661,34 +646,59 @@ impl events: Events, config: RpcModuleConfig, evm_config: EvmConfig, - ) -> Self { + eth_api_builder: EthApiB, + ) -> Self + where + EvmConfig: ConfigureEvm, + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + { + let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests); + + let eth = EthHandlers::builder( + provider.clone(), + pool.clone(), + network.clone(), + evm_config, + config.eth, + executor.clone(), + events.clone(), + eth_api_builder, + ) + .build(); + Self { provider, pool, network, - evm_config, - eth: None, + eth, executor, modules: Default::default(), - blocking_pool_guard: BlockingTaskGuard::new(config.eth.max_tracing_requests), - config, + blocking_pool_guard, events, - eth_raw_transaction_forwarder: None, } } +} - /// Sets a forwarder for `eth_sendRawTransaction` +impl + RpcRegistryInner +{ + /// Returns a reference to the installed [`EthApi`](reth_rpc::eth::EthApi). + pub const fn eth_api(&self) -> &EthApi { + &self.eth.api + } + + /// Returns a reference to the installed [`EthHandlers`]. + pub const fn eth_handlers(&self) -> &EthHandlers { + &self.eth + } + + /// Returns the [`EthStateCache`] frontend /// - /// Note: this might be removed in the future in favor of a more generic approach. - pub fn set_eth_raw_transaction_forwarder( - &mut self, - forwarder: Arc, - ) { - if let Some(eth) = self.eth.as_ref() { - // in case the eth api has been created before the forwarder was set: - eth.api.set_eth_raw_transaction_forwarder(forwarder.clone()); - } - self.eth_raw_transaction_forwarder = Some(forwarder); + /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is + /// requested. + pub const fn eth_cache(&self) -> &EthStateCache { + &self.eth.cache } /// Returns a reference to the pool @@ -726,13 +736,30 @@ impl } } -impl - RethModuleRegistry +impl + RpcRegistryInner where - Network: NetworkInfo + Peers + Clone + 'static, + EthApi: UpdateRawTxForwarder, +{ + /// Sets a forwarder for `eth_sendRawTransaction` + /// + /// Note: this might be removed in the future in favor of a more generic approach. + pub fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { + // in case the eth api has been created before the forwarder was set: + self.eth.api.set_eth_raw_transaction_forwarder(forwarder.clone()); + } +} + +impl + RpcRegistryInner +where + Network: NetworkInfo + Clone + 'static, { /// Instantiates `AdminApi` - pub fn admin_api(&self) -> AdminApi { + pub fn admin_api(&self) -> AdminApi + where + Network: Peers, + { AdminApi::new(self.network.clone(), self.provider.chain_spec()) } @@ -742,7 +769,10 @@ where } /// Register Admin Namespace - pub fn register_admin(&mut self) -> &mut Self { + pub fn register_admin(&mut self) -> &mut Self + where + Network: Peers, + { let adminapi = self.admin_api(); self.modules.insert(RethRpcModule::Admin, adminapi.into_rpc().into()); self @@ -756,31 +786,24 @@ where } } -impl - RethModuleRegistry +impl + RpcRegistryInner where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EthApi: Clone, { /// Register Eth Namespace /// /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_eth(&mut self) -> &mut Self { - let eth_api = self.eth_api(); + pub fn register_eth(&mut self) -> &mut Self + where + EthApi: EthApiServer, + { + let eth_api = self.eth_api().clone(); self.modules.insert(RethRpcModule::Eth, eth_api.into_rpc().into()); self } @@ -790,7 +813,10 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_ots(&mut self) -> &mut Self { + pub fn register_ots(&mut self) -> &mut Self + where + EthApi: EthApiServer + TraceExt, + { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); self @@ -801,7 +827,10 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_debug(&mut self) -> &mut Self { + pub fn register_debug(&mut self) -> &mut Self + where + EthApi: EthApiSpec + EthTransactions + TraceExt, + { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); self @@ -812,34 +841,15 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_trace(&mut self) -> &mut Self { + pub fn register_trace(&mut self) -> &mut Self + where + EthApi: TraceExt, + { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); self } - /// Configures the auth module that includes the - /// * `engine_` namespace - /// * `api_` namespace - /// - /// Note: This does _not_ register the `engine_` in this registry. - pub fn create_auth_module(&mut self, engine_api: EngineApi) -> AuthRpcModule - where - EngineT: EngineTypes + 'static, - EngineApi: EngineApiServer, - { - let eth_handlers = self.eth_handlers(); - let mut module = RpcModule::new(()); - - module.merge(engine_api.into_rpc()).expect("No conflicting methods"); - - // also merge a subset of `eth_` handlers - let engine_eth = EngineEthApi::new(eth_handlers.api.clone(), eth_handlers.filter); - module.merge(engine_eth.into_rpc()).expect("No conflicting methods"); - - AuthRpcModule { inner: module } - } - /// Register Net Namespace /// /// See also [`Self::eth_api`] @@ -847,7 +857,10 @@ where /// # Panics /// /// If called outside of the tokio runtime. - pub fn register_net(&mut self) -> &mut Self { + pub fn register_net(&mut self) -> &mut Self + where + EthApi: EthApiSpec + 'static, + { let netapi = self.net_api(); self.modules.insert(RethRpcModule::Net, netapi.into_rpc().into()); self @@ -866,6 +879,113 @@ where self } + /// Instantiates `TraceApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn trace_api(&self) -> TraceApi + where + EthApi: TraceExt, + { + TraceApi::new( + self.provider.clone(), + self.eth_api().clone(), + self.blocking_pool_guard.clone(), + ) + } + + /// Instantiates [`EthBundle`] Api + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn bundle_api(&self) -> EthBundle + where + EthApi: EthTransactions + LoadPendingBlock + Call, + { + let eth_api = self.eth_api().clone(); + EthBundle::new(eth_api, self.blocking_pool_guard.clone()) + } + + /// Instantiates `OtterscanApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn otterscan_api(&self) -> OtterscanApi + where + EthApi: EthApiServer, + { + let eth_api = self.eth_api().clone(); + OtterscanApi::new(eth_api) + } + + /// Instantiates `DebugApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn debug_api(&self) -> DebugApi + where + EthApi: EthApiSpec + EthTransactions + TraceExt, + { + let eth_api = self.eth_api().clone(); + DebugApi::new(self.provider.clone(), eth_api, self.blocking_pool_guard.clone()) + } + + /// Instantiates `NetApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn net_api(&self) -> NetApi + where + EthApi: EthApiSpec + 'static, + { + let eth_api = self.eth_api().clone(); + NetApi::new(self.network.clone(), eth_api) + } + + /// Instantiates `RethApi` + pub fn reth_api(&self) -> RethApi { + RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) + } +} + +impl + RpcRegistryInner +where + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, + Network: NetworkInfo + Peers + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, + EthApi: FullEthApiServer, +{ + /// Configures the auth module that includes the + /// * `engine_` namespace + /// * `api_` namespace + /// + /// Note: This does _not_ register the `engine_` in this registry. + pub fn create_auth_module(&self, engine_api: EngineApi) -> AuthRpcModule + where + EngineT: EngineTypes + 'static, + EngineApi: EngineApiServer, + { + let mut module = RpcModule::new(()); + + module.merge(engine_api.into_rpc()).expect("No conflicting methods"); + + // also merge a subset of `eth_` handlers + let eth_handlers = self.eth_handlers(); + let engine_eth = EngineEthApi::new(eth_handlers.api.clone(), eth_handlers.filter.clone()); + + module.merge(engine_eth.into_rpc()).expect("No conflicting methods"); + + AuthRpcModule { inner: module } + } + /// Helper function to create a [`RpcModule`] if it's not `None` fn maybe_module(&mut self, config: Option<&RpcModuleSelection>) -> Option> { config.map(|config| self.module_for(config)) @@ -913,13 +1033,8 @@ where &mut self, namespaces: impl Iterator, ) -> Vec { - let EthHandlers { - api: eth_api, - filter: eth_filter, - pubsub: eth_pubsub, - cache: _, - blocking_task_pool: _, - } = self.with_eth(|eth| eth.clone()); + let EthHandlers { api: eth_api, filter: eth_filter, pubsub: eth_pubsub, .. } = + self.eth_handlers().clone(); // Create a copy, so we can list out all the methods for rpc_ api let namespaces: Vec<_> = namespaces.collect(); @@ -988,172 +1103,6 @@ where }) .collect::>() } - - /// Returns the [`EthStateCache`] frontend - /// - /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is - /// requested. - pub fn eth_cache(&mut self) -> EthStateCache { - self.with_eth(|handlers| handlers.cache.clone()) - } - - /// Creates the [`EthHandlers`] type the first time this is called. - /// - /// This will spawn the required service tasks for [`EthApi`] for: - /// - [`EthStateCache`] - /// - [`FeeHistoryCache`] - fn with_eth(&mut self, f: F) -> R - where - F: FnOnce(&EthHandlers) -> R, - { - f(match &self.eth { - Some(eth) => eth, - None => self.eth.insert(self.init_eth()), - }) - } - - fn init_eth(&self) -> EthHandlers { - let cache = EthStateCache::spawn_with( - self.provider.clone(), - self.config.eth.cache.clone(), - self.executor.clone(), - self.evm_config.clone(), - ); - let gas_oracle = GasPriceOracle::new( - self.provider.clone(), - self.config.eth.gas_oracle.clone(), - cache.clone(), - ); - let new_canonical_blocks = self.events.canonical_state_stream(); - let c = cache.clone(); - - self.executor.spawn_critical( - "cache canonical blocks task", - Box::pin(async move { - cache_new_blocks_task(c, new_canonical_blocks).await; - }), - ); - - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), self.config.eth.fee_history_cache.clone()); - let new_canonical_blocks = self.events.canonical_state_stream(); - let fhc = fee_history_cache.clone(); - let provider_clone = self.provider.clone(); - self.executor.spawn_critical( - "cache canonical blocks for fee history task", - Box::pin(async move { - fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider_clone).await; - }), - ); - - let executor = Box::new(self.executor.clone()); - let blocking_task_pool = BlockingTaskPool::build().expect("failed to build tracing pool"); - let api = EthApi::with_spawner( - self.provider.clone(), - self.pool.clone(), - self.network.clone(), - cache.clone(), - gas_oracle, - self.config.eth.rpc_gas_cap, - executor.clone(), - blocking_task_pool.clone(), - fee_history_cache, - self.evm_config.clone(), - self.eth_raw_transaction_forwarder.clone(), - ); - let filter = EthFilter::new( - self.provider.clone(), - self.pool.clone(), - cache.clone(), - self.config.eth.filter_config(), - executor.clone(), - ); - - let pubsub = EthPubSub::with_spawner( - self.provider.clone(), - self.pool.clone(), - self.events.clone(), - self.network.clone(), - executor, - ); - - EthHandlers { api, cache, filter, pubsub, blocking_task_pool } - } - - /// Returns the configured [`EthHandlers`] or creates it if it does not exist yet - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn eth_handlers(&mut self) -> EthHandlers { - self.with_eth(|handlers| handlers.clone()) - } - - /// Returns the configured [`EthApi`] or creates it if it does not exist yet - /// - /// Caution: This will spawn the necessary tasks required by the [`EthApi`]: [`EthStateCache`]. - /// - /// # Panics - /// - /// If called outside of the tokio runtime. - pub fn eth_api(&mut self) -> EthApi { - self.with_eth(|handlers| handlers.api.clone()) - } - - /// Instantiates `TraceApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&mut self) -> TraceApi> { - let eth = self.eth_handlers(); - TraceApi::new(self.provider.clone(), eth.api, self.blocking_pool_guard.clone()) - } - - /// Instantiates [`EthBundle`] Api - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn bundle_api(&mut self) -> EthBundle> { - let eth_api = self.eth_api(); - EthBundle::new(eth_api, self.blocking_pool_guard.clone()) - } - - /// Instantiates `OtterscanApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn otterscan_api(&mut self) -> OtterscanApi> { - let eth_api = self.eth_api(); - OtterscanApi::new(eth_api) - } - - /// Instantiates `DebugApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&mut self) -> DebugApi> { - let eth_api = self.eth_api(); - DebugApi::new(self.provider.clone(), eth_api, self.blocking_pool_guard.clone()) - } - - /// Instantiates `NetApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn net_api(&mut self) -> NetApi> { - let eth_api = self.eth_api(); - NetApi::new(self.network.clone(), eth_api) - } - - /// Instantiates `RethApi` - pub fn reth_api(&self) -> RethApi { - RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) - } } /// A builder type for configuring and launching the servers that will handle RPC requests. @@ -1306,7 +1255,7 @@ impl RpcServerConfig { /// Returns true if any server is configured. /// - /// If no server is configured, no server will be be launched on [`RpcServerConfig::start`]. + /// If no server is configured, no server will be launched on [`RpcServerConfig::start`]. pub const fn has_server(&self) -> bool { self.http_server_config.is_some() || self.ws_server_config.is_some() || @@ -1328,28 +1277,26 @@ impl RpcServerConfig { self.ipc_endpoint.clone() } - /// Convenience function to do [`RpcServerConfig::build`] and [`RpcServer::start`] in one step - pub async fn start(self, modules: TransportRpcModules) -> Result { - self.build(&modules).await?.start(modules).await - } - /// Creates the [`CorsLayer`] if any fn maybe_cors_layer(cors: Option) -> Result, CorsDomainError> { cors.as_deref().map(cors::create_cors_layer).transpose() } /// Creates the [`AuthLayer`] if any - fn maybe_jwt_layer(&self) -> Option> { - self.jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) + fn maybe_jwt_layer(jwt_secret: Option) -> Option> { + jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } - /// Builds the ws and http server(s). + /// Builds and starts the configured server(s): http, ws, ipc. /// - /// If both are on the same port, they are combined into one server. - async fn build_ws_http( - &mut self, - modules: &TransportRpcModules, - ) -> Result { + /// If both http and ws are on the same port, they are combined into one server. + /// + /// Returns the [`RpcServerHandle`] with the handle to the started servers. + pub async fn start(self, modules: &TransportRpcModules) -> Result { + let mut http_handle = None; + let mut ws_handle = None; + let mut ipc_handle = None; + let http_socket_addr = self.http_addr.unwrap_or(SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::LOCALHOST, constants::DEFAULT_HTTP_RPC_PORT, @@ -1360,6 +1307,17 @@ impl RpcServerConfig { constants::DEFAULT_WS_RPC_PORT, ))); + let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); + let ipc_path = + self.ipc_endpoint.clone().unwrap_or_else(|| constants::DEFAULT_IPC_ENDPOINT.into()); + + if let Some(builder) = self.ipc_server_config { + let ipc = builder + .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) + .build(ipc_path); + ipc_handle = Some(ipc.start(modules.ipc.clone().expect("ipc server error")).await?); + } + // If both are configured on the same port, we combine them into one server. if self.http_addr == self.ws_addr && self.http_server_config.is_some() && @@ -1381,53 +1339,62 @@ impl RpcServerConfig { .cloned(); // we merge this into one server using the http setup - self.ws_server_config.take(); - modules.config.ensure_ws_http_identical()?; - let builder = self.http_server_config.take().expect("http_server_config is Some"); - let server = builder - .set_http_middleware( - tower::ServiceBuilder::new() - .option_layer(Self::maybe_cors_layer(cors)?) - .option_layer(self.maybe_jwt_layer()), - ) - .set_rpc_middleware( - RpcServiceBuilder::new().layer( - modules - .http - .as_ref() - .or(modules.ws.as_ref()) - .map(RpcRequestMetrics::same_port) - .unwrap_or_default(), - ), - ) - .build(http_socket_addr) - .await - .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; - let addr = server - .local_addr() - .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; - return Ok(WsHttpServer { - http_local_addr: Some(addr), - ws_local_addr: Some(addr), - server: WsHttpServers::SamePort(server), - jwt_secret: self.jwt_secret, - }); + if let Some(builder) = self.http_server_config { + let server = builder + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(cors)?) + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules + .http + .as_ref() + .or(modules.ws.as_ref()) + .map(RpcRequestMetrics::same_port) + .unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| { + RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)) + })?; + let addr = server.local_addr().map_err(|err| { + RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)) + })?; + if let Some(module) = modules.http.as_ref().or(modules.ws.as_ref()) { + let handle = server.start(module.clone()); + http_handle = Some(handle.clone()); + ws_handle = Some(handle); + } + return Ok(RpcServerHandle { + http_local_addr: Some(addr), + ws_local_addr: Some(addr), + http: http_handle, + ws: ws_handle, + ipc_endpoint: self.ipc_endpoint.clone(), + ipc: ipc_handle, + jwt_secret: self.jwt_secret, + }); + } } + let mut ws_local_addr = None; + let mut ws_server = None; let mut http_local_addr = None; let mut http_server = None; - let mut ws_local_addr = None; - let mut ws_server = None; - if let Some(builder) = self.ws_server_config.take() { + if let Some(builder) = self.ws_server_config { let server = builder .ws_only() .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) - .option_layer(self.maybe_jwt_layer()), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( RpcServiceBuilder::new() @@ -1436,6 +1403,7 @@ impl RpcServerConfig { .build(ws_socket_addr) .await .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + let addr = server .local_addr() .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; @@ -1444,13 +1412,13 @@ impl RpcServerConfig { ws_server = Some(server); } - if let Some(builder) = self.http_server_config.take() { + if let Some(builder) = self.http_server_config { let server = builder .http_only() .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) - .option_layer(self.maybe_jwt_layer()), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( RpcServiceBuilder::new().layer( @@ -1467,36 +1435,20 @@ impl RpcServerConfig { http_server = Some(server); } - Ok(WsHttpServer { + http_handle = http_server + .map(|http_server| http_server.start(modules.http.clone().expect("http server error"))); + ws_handle = ws_server + .map(|ws_server| ws_server.start(modules.ws.clone().expect("ws server error"))); + Ok(RpcServerHandle { http_local_addr, ws_local_addr, - server: WsHttpServers::DifferentPort { http: http_server, ws: ws_server }, + http: http_handle, + ws: ws_handle, + ipc_endpoint: self.ipc_endpoint.clone(), + ipc: ipc_handle, jwt_secret: self.jwt_secret, }) } - - /// Finalize the configuration of the server(s). - /// - /// This consumes the builder and returns a server. - /// - /// Note: The server is not started and does nothing unless polled, See also - /// [`RpcServer::start`] - pub async fn build(mut self, modules: &TransportRpcModules) -> Result { - let mut server = RpcServer::empty(); - server.ws_http = self.build_ws_http(modules).await?; - - if let Some(builder) = self.ipc_server_config { - let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); - let ipc_path = - self.ipc_endpoint.unwrap_or_else(|| constants::DEFAULT_IPC_ENDPOINT.into()); - let ipc = builder - .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) - .build(ipc_path); - server.ipc = Some(ipc); - } - - Ok(server) - } } /// Holds modules to be installed per transport type @@ -1703,167 +1655,6 @@ impl TransportRpcModules { self.merge_ipc(other)?; Ok(()) } - - /// Convenience function for starting a server - pub async fn start_server(self, builder: RpcServerConfig) -> Result { - builder.start(self).await - } -} - -/// Container type for ws and http servers in all possible combinations. -#[derive(Default)] -struct WsHttpServer { - /// The address of the http server - http_local_addr: Option, - /// The address of the ws server - ws_local_addr: Option, - /// Configured ws,http servers - server: WsHttpServers, - /// The jwt secret. - jwt_secret: Option, -} - -// Define the type alias with detailed type complexity -type WsHttpServerKind = Server< - Stack< - tower::util::Either, Identity>, - Stack, Identity>, - >, - Stack, ->; - -/// Enum for holding the http and ws servers in all possible combinations. -enum WsHttpServers { - /// Both servers are on the same port - SamePort(WsHttpServerKind), - /// Servers are on different ports - DifferentPort { http: Option, ws: Option }, -} - -// === impl WsHttpServers === - -impl WsHttpServers { - /// Starts the servers and returns the handles (http, ws) - async fn start( - self, - http_module: Option>, - ws_module: Option>, - config: &TransportRpcModuleConfig, - ) -> Result<(Option, Option), RpcError> { - let mut http_handle = None; - let mut ws_handle = None; - match self { - Self::SamePort(server) => { - // Make sure http and ws modules are identical, since we currently can't run - // different modules on same server - config.ensure_ws_http_identical()?; - - if let Some(module) = http_module.or(ws_module) { - let handle = server.start(module); - http_handle = Some(handle.clone()); - ws_handle = Some(handle); - } - } - Self::DifferentPort { http, ws } => { - if let Some((server, module)) = - http.and_then(|server| http_module.map(|module| (server, module))) - { - http_handle = Some(server.start(module)); - } - if let Some((server, module)) = - ws.and_then(|server| ws_module.map(|module| (server, module))) - { - ws_handle = Some(server.start(module)); - } - } - } - - Ok((http_handle, ws_handle)) - } -} - -impl Default for WsHttpServers { - fn default() -> Self { - Self::DifferentPort { http: None, ws: None } - } -} - -/// Container type for each transport ie. http, ws, and ipc server -pub struct RpcServer { - /// Configured ws,http servers - ws_http: WsHttpServer, - /// ipc server - ipc: Option>>, -} - -// === impl RpcServer === - -impl RpcServer { - fn empty() -> Self { - Self { ws_http: Default::default(), ipc: None } - } - - /// Returns the [`SocketAddr`] of the http server if started. - pub const fn http_local_addr(&self) -> Option { - self.ws_http.http_local_addr - } - /// Return the `JwtSecret` of the server - pub const fn jwt(&self) -> Option { - self.ws_http.jwt_secret - } - - /// Returns the [`SocketAddr`] of the ws server if started. - pub const fn ws_local_addr(&self) -> Option { - self.ws_http.ws_local_addr - } - - /// Returns the endpoint of the ipc server if started. - pub fn ipc_endpoint(&self) -> Option { - self.ipc.as_ref().map(|ipc| ipc.endpoint()) - } - - /// Starts the configured server by spawning the servers on the tokio runtime. - /// - /// This returns an [RpcServerHandle] that's connected to the server task(s) until the server is - /// stopped or the [RpcServerHandle] is dropped. - #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint()), target = "rpc", level = "TRACE")] - pub async fn start(self, modules: TransportRpcModules) -> Result { - trace!(target: "rpc", "staring RPC server"); - let Self { ws_http, ipc: ipc_server } = self; - let TransportRpcModules { config, http, ws, ipc } = modules; - let mut handle = RpcServerHandle { - http_local_addr: ws_http.http_local_addr, - ws_local_addr: ws_http.ws_local_addr, - http: None, - ws: None, - ipc_endpoint: None, - ipc: None, - jwt_secret: None, - }; - - let (http, ws) = ws_http.server.start(http, ws, &config).await?; - handle.http = http; - handle.ws = ws; - - if let Some((server, module)) = - ipc_server.and_then(|server| ipc.map(|module| (server, module))) - { - handle.ipc_endpoint = Some(server.endpoint()); - handle.ipc = Some(server.start(module).await?); - } - - Ok(handle) - } -} - -impl fmt::Debug for RpcServer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServer") - .field("http", &self.ws_http.http_local_addr.is_some()) - .field("ws", &self.ws_http.ws_local_addr.is_some()) - .field("ipc", &self.ipc.is_some()) - .finish() - } } /// A handle to the spawned servers. @@ -1879,7 +1670,7 @@ pub struct RpcServerHandle { http: Option, ws: Option, ipc_endpoint: Option, - ipc: Option, + ipc: Option, jwt_secret: Option, } diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 17bd638d0747..14143d229cca 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -11,9 +11,9 @@ use jsonrpsee::{ rpc_params, types::error::ErrorCode, }; +use reth_network_peers::NodeRecord; use reth_primitives::{ - hex_literal::hex, Address, BlockId, BlockNumberOrTag, Bytes, NodeRecord, TxHash, B256, B64, - U256, U64, + hex_literal::hex, Address, BlockId, BlockNumberOrTag, Bytes, TxHash, B256, B64, U256, U64, }; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, @@ -159,6 +159,17 @@ where let transaction_request = TransactionRequest::default(); let bytes = Bytes::default(); let tx = Bytes::from(hex!("02f871018303579880850555633d1b82520894eee27662c2b8eba3cd936a23f039f3189633e4c887ad591c62bdaeb180c080a07ea72c68abfb8fca1bd964f0f99132ed9280261bdca3e549546c0205e800f7d0a05b4ef3039e9c9b9babc179a1878fb825b5aaf5aed2fa8744854150157b08d6f3")); + let typed_data = serde_json::from_str( + r#"{ + "types": { + "EIP712Domain": [] + }, + "primaryType": "EIP712Domain", + "domain": {}, + "message": {} + }"#, + ) + .unwrap(); // Implemented EthApiClient::protocol_version(client).await.unwrap(); @@ -180,9 +191,7 @@ where EthApiClient::uncle_by_block_hash_and_index(client, hash, index).await.unwrap(); EthApiClient::uncle_by_block_number_and_index(client, block_number, index).await.unwrap(); EthApiClient::sign(client, address, bytes.clone()).await.unwrap_err(); - EthApiClient::sign_typed_data(client, address, jsonrpsee::core::JsonValue::Null) - .await - .unwrap_err(); + EthApiClient::sign_typed_data(client, address, typed_data).await.unwrap_err(); EthApiClient::transaction_by_hash(client, tx_hash).await.unwrap(); EthApiClient::transaction_by_block_hash_and_index(client, hash, index).await.unwrap(); EthApiClient::transaction_by_block_number_and_index(client, block_number, index).await.unwrap(); @@ -225,7 +234,7 @@ where let block_id = BlockId::Number(BlockNumberOrTag::default()); DebugApiClient::raw_header(client, block_id).await.unwrap(); - DebugApiClient::raw_block(client, block_id).await.unwrap(); + DebugApiClient::raw_block(client, block_id).await.unwrap_err(); DebugApiClient::raw_transaction(client, B256::default()).await.unwrap(); DebugApiClient::raw_receipts(client, block_id).await.unwrap(); assert!(is_unimplemented(DebugApiClient::bad_blocks(client).await.err().unwrap())); @@ -286,12 +295,14 @@ where let address = Address::default(); let sender = Address::default(); let tx_hash = TxHash::default(); - let block_number = BlockNumberOrTag::default(); + let block_number = 1; let page_number = 1; let page_size = 10; let nonce = 1; let block_hash = B256::default(); + OtterscanClient::get_header_by_number(client, block_number).await.unwrap(); + OtterscanClient::has_code(client, address, None).await.unwrap(); OtterscanClient::get_api_level(client).await.unwrap(); @@ -300,9 +311,7 @@ where OtterscanClient::get_transaction_error(client, tx_hash).await.unwrap(); - assert!(is_unimplemented( - OtterscanClient::trace_transaction(client, tx_hash).await.err().unwrap() - )); + OtterscanClient::trace_transaction(client, tx_hash).await.unwrap(); OtterscanClient::get_block_details(client, block_number).await.unwrap(); @@ -325,15 +334,11 @@ where .err() .unwrap() )); - assert!(is_unimplemented( - OtterscanClient::get_transaction_by_sender_and_nonce(client, sender, nonce,) - .await - .err() - .unwrap() - )); - assert!(is_unimplemented( - OtterscanClient::get_contract_creator(client, address).await.err().unwrap() - )); + assert!(OtterscanClient::get_transaction_by_sender_and_nonce(client, sender, nonce) + .await + .err() + .is_none()); + assert!(OtterscanClient::get_contract_creator(client, address).await.unwrap().is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -543,7 +548,7 @@ async fn test_eth_logs_args() { let client = handle.http_client().unwrap(); let mut params = ArrayParams::default(); - params.insert( serde_json::json!({"blockHash":"0x58dc57ab582b282c143424bd01e8d923cddfdcda9455bad02a29522f6274a948"})).unwrap(); + params.insert(serde_json::json!({"blockHash":"0x58dc57ab582b282c143424bd01e8d923cddfdcda9455bad02a29522f6274a948"})).unwrap(); let resp = client.request::, _>("eth_getLogs", params).await; // block does not exist diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index 91800166f1d9..5680d03a5307 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -1,14 +1,16 @@ //! Startup tests -use crate::utils::{ - launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder, -}; +use std::io; + use reth_rpc_builder::{ error::{RpcError, ServerKind, WsHttpSamePortError}, - RpcServerConfig, TransportRpcModuleConfig, + EthApiBuild, RpcServerConfig, TransportRpcModuleConfig, }; use reth_rpc_server_types::RethRpcModule; -use std::io; + +use crate::utils::{ + launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder, +}; fn is_addr_in_use_kind(err: &RpcError, kind: ServerKind) -> bool { match err { @@ -24,10 +26,10 @@ async fn test_http_addr_in_use() { let handle = launch_http(vec![RethRpcModule::Admin]).await; let addr = handle.http_local_addr().unwrap(); let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin])); - let result = server - .start_server(RpcServerConfig::http(Default::default()).with_http_address(addr)) - .await; + let server = builder + .build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), EthApiBuild::build); + let result = + RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; let err = result.unwrap_err(); assert!(is_addr_in_use_kind(&err, ServerKind::Http(addr)), "{err}"); } @@ -37,9 +39,9 @@ async fn test_ws_addr_in_use() { let handle = launch_ws(vec![RethRpcModule::Admin]).await; let addr = handle.ws_local_addr().unwrap(); let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin])); - let result = - server.start_server(RpcServerConfig::ws(Default::default()).with_ws_address(addr)).await; + let server = builder + .build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), EthApiBuild::build); + let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); assert!(is_addr_in_use_kind(&err, ServerKind::WS(addr)), "{err}"); } @@ -58,15 +60,14 @@ async fn test_launch_same_port_different_modules() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), + EthApiBuild::build, ); let addr = test_address(); - let res = server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_http_address(addr), - ) + let res = RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_http_address(addr) + .start(&server) .await; let err = res.unwrap_err(); assert!(matches!( @@ -81,17 +82,16 @@ async fn test_launch_same_port_same_cors() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), + EthApiBuild::build, ); let addr = test_address(); - let res = server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_cors(Some("*".to_string())) - .with_http_cors(Some("*".to_string())) - .with_http_address(addr), - ) + let res = RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_cors(Some("*".to_string())) + .with_http_cors(Some("*".to_string())) + .with_http_address(addr) + .start(&server) .await; assert!(res.is_ok()); } @@ -102,17 +102,16 @@ async fn test_launch_same_port_different_cors() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), + EthApiBuild::build, ); let addr = test_address(); - let res = server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_cors(Some("*".to_string())) - .with_http_cors(Some("example".to_string())) - .with_http_address(addr), - ) + let res = RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_cors(Some("*".to_string())) + .with_http_cors(Some("example".to_string())) + .with_http_address(addr) + .start(&server) .await; let err = res.unwrap_err(); assert!(matches!( diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 9d660ae3035d..ea9954f23c10 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,13 +1,15 @@ +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; + use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_chainspec::MAINNET; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::MAINNET; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerConfig, AuthServerHandle}, - RpcModuleBuilder, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig, + EthApiBuild, RpcModuleBuilder, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig, }; use reth_rpc_engine_api::EngineApi; use reth_rpc_layer::JwtSecret; @@ -15,7 +17,6 @@ use reth_rpc_server_types::RpcModuleSelection; use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_tasks::TokioTaskExecutor; use reth_transaction_pool::test_utils::{TestPool, TestPoolBuilder}; -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use tokio::sync::mpsc::unbounded_channel; /// Localhost with port 0 so a free port is used. @@ -51,9 +52,10 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { /// Launches a new server with http only with the given modules pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_http(modules)); - server - .start_server(RpcServerConfig::http(Default::default()).with_http_address(test_address())) + let server = builder.build(TransportRpcModuleConfig::set_http(modules), EthApiBuild::build); + RpcServerConfig::http(Default::default()) + .with_http_address(test_address()) + .start(&server) .await .unwrap() } @@ -61,9 +63,10 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan /// Launches a new server with ws only with the given modules pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_ws(modules)); - server - .start_server(RpcServerConfig::ws(Default::default()).with_ws_address(test_address())) + let server = builder.build(TransportRpcModuleConfig::set_ws(modules), EthApiBuild::build); + RpcServerConfig::ws(Default::default()) + .with_http_address(test_address()) + .start(&server) .await .unwrap() } @@ -72,15 +75,15 @@ pub async fn launch_ws(modules: impl Into) -> RpcServerHandl pub async fn launch_http_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); let modules = modules.into(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules)); - server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(test_address()) - .with_http(Default::default()) - .with_http_address(test_address()), - ) + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), + EthApiBuild::build, + ); + RpcServerConfig::ws(Default::default()) + .with_ws_address(test_address()) + .with_http(Default::default()) + .with_http_address(test_address()) + .start(&server) .await .unwrap() } @@ -89,16 +92,16 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer pub async fn launch_http_ws_same_port(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); let modules = modules.into(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules)); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), + EthApiBuild::build, + ); let addr = test_address(); - server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_http_address(addr), - ) + RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_http_address(addr) + .start(&server) .await .unwrap() } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 80c69d8a4dc4..d067515f6c2a 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-rpc-api.workspace = true reth-rpc-types.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index ae46a3f3d2fb..55534083a582 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -2,6 +2,7 @@ use crate::{metrics::EngineApiMetrics, EngineApiError, EngineApiResult}; use async_trait::async_trait; use jsonrpsee_core::RpcResult; use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_chainspec::ChainSpec; use reth_engine_primitives::EngineTypes; use reth_evm::provider::EvmEnvProvider; use reth_payload_builder::PayloadStore; @@ -9,16 +10,18 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, B256, U64}; +use reth_primitives::{ + Block, BlockHash, BlockHashOrNumber, BlockNumber, EthereumHardfork, B256, U64, +}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, - CAPABILITIES, + ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, + ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ - convert_payload_input_v2_to_payload, convert_to_payload_body_v1, + convert_payload_input_v2_to_payload, convert_to_payload_body_v1, convert_to_payload_body_v2, }; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -82,7 +85,7 @@ where } /// Fetches the client version. - async fn get_client_version_v1( + fn get_client_version_v1( &self, _client: ClientVersionV1, ) -> EngineApiResult> { @@ -358,21 +361,18 @@ where }) } - /// Returns the execution payload bodies by the range starting at `start`, containing `count` - /// blocks. - /// - /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus - /// layer p2p specification, meaning the input should be treated as untrusted or potentially - /// adversarial. - /// - /// Implementers should take care when acting on the input to this method, specifically - /// ensuring that the range is limited properly, and that the range boundaries are computed - /// correctly and without panics. - pub async fn get_payload_bodies_by_range( + /// Fetches all the blocks for the provided range starting at `start`, containing `count` + /// blocks and returns the mapped payload bodies. + async fn get_payload_bodies_by_range_with( &self, start: BlockNumber, count: u64, - ) -> EngineApiResult { + f: F, + ) -> EngineApiResult>> + where + F: Fn(Block) -> R + Send + 'static, + R: Send + 'static, + { let (tx, rx) = oneshot::channel(); let inner = self.inner.clone(); @@ -404,7 +404,7 @@ where let block_result = inner.provider.block(BlockHashOrNumber::Number(num)); match block_result { Ok(block) => { - result.push(block.map(convert_to_payload_body_v1)); + result.push(block.map(&f)); } Err(err) => { tx.send(Err(EngineApiError::Internal(Box::new(err)))).ok(); @@ -418,11 +418,45 @@ where rx.await.map_err(|err| EngineApiError::Internal(Box::new(err)))? } + /// Returns the execution payload bodies by the range starting at `start`, containing `count` + /// blocks. + /// + /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus + /// layer p2p specification, meaning the input should be treated as untrusted or potentially + /// adversarial. + /// + /// Implementers should take care when acting on the input to this method, specifically + /// ensuring that the range is limited properly, and that the range boundaries are computed + /// correctly and without panics. + pub async fn get_payload_bodies_by_range_v1( + &self, + start: BlockNumber, + count: u64, + ) -> EngineApiResult { + self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v1).await + } + + /// Returns the execution payload bodies by the range starting at `start`, containing `count` + /// blocks. + /// + /// Same as [`Self::get_payload_bodies_by_range_v1`] but as [`ExecutionPayloadBodiesV2`]. + pub async fn get_payload_bodies_by_range_v2( + &self, + start: BlockNumber, + count: u64, + ) -> EngineApiResult { + self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v2).await + } + /// Called to retrieve execution payload bodies by hashes. - pub fn get_payload_bodies_by_hash( + fn get_payload_bodies_by_hash_with( &self, hashes: Vec, - ) -> EngineApiResult { + f: F, + ) -> EngineApiResult>> + where + F: Fn(Block) -> R, + { let len = hashes.len() as u64; if len > MAX_PAYLOAD_BODIES_LIMIT { return Err(EngineApiError::PayloadRequestTooLarge { len }); @@ -435,15 +469,33 @@ where .provider .block(BlockHashOrNumber::Hash(hash)) .map_err(|err| EngineApiError::Internal(Box::new(err)))?; - result.push(block.map(convert_to_payload_body_v1)); + result.push(block.map(&f)); } Ok(result) } + /// Called to retrieve execution payload bodies by hashes. + pub fn get_payload_bodies_by_hash_v1( + &self, + hashes: Vec, + ) -> EngineApiResult { + self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1) + } + + /// Called to retrieve execution payload bodies by hashes. + /// + /// Same as [`Self::get_payload_bodies_by_hash_v1`] but as [`ExecutionPayloadBodiesV2`]. + pub fn get_payload_bodies_by_hash_v2( + &self, + hashes: Vec, + ) -> EngineApiResult { + self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2) + } + /// Called to verify network configuration parameters and ensure that Consensus and Execution /// layers are using the latest configuration. - pub async fn exchange_transition_configuration( + pub fn exchange_transition_configuration( &self, config: TransitionConfiguration, ) -> EngineApiResult { @@ -456,7 +508,7 @@ where let merge_terminal_td = self .inner .chain_spec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .ttd() .expect("the engine API should not be running for chains w/o paris"); @@ -468,7 +520,7 @@ where }); } - self.inner.beacon_consensus.transition_configuration_exchanged().await; + self.inner.beacon_consensus.transition_configuration_exchanged(); // Short circuit if communicated block hash is zero if terminal_block_hash.is_zero() { @@ -759,11 +811,22 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1"); let start = Instant::now(); - let res = Self::get_payload_bodies_by_hash(self, block_hashes); + let res = Self::get_payload_bodies_by_hash_v1(self, block_hashes); self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed()); Ok(res?) } + async fn get_payload_bodies_by_hash_v2( + &self, + block_hashes: Vec, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV2"); + let start = Instant::now(); + let res = Self::get_payload_bodies_by_hash_v2(self, block_hashes); + self.inner.metrics.latency.get_payload_bodies_by_hash_v2.record(start.elapsed()); + Ok(res?) + } + /// Handler for `engine_getPayloadBodiesByRangeV1` /// /// See also @@ -787,11 +850,23 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); let start_time = Instant::now(); - let res = Self::get_payload_bodies_by_range(self, start.to(), count.to()).await; + let res = Self::get_payload_bodies_by_range_v1(self, start.to(), count.to()).await; self.inner.metrics.latency.get_payload_bodies_by_range_v1.record(start_time.elapsed()); Ok(res?) } + async fn get_payload_bodies_by_range_v2( + &self, + start: U64, + count: U64, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV2"); + let start_time = Instant::now(); + let res = Self::get_payload_bodies_by_range_v2(self, start.to(), count.to()).await; + self.inner.metrics.latency.get_payload_bodies_by_range_v2.record(start_time.elapsed()); + Ok(res?) + } + /// Handler for `engine_exchangeTransitionConfigurationV1` /// See also async fn exchange_transition_configuration( @@ -800,7 +875,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_exchangeTransitionConfigurationV1"); let start = Instant::now(); - let res = Self::exchange_transition_configuration(self, config).await; + let res = Self::exchange_transition_configuration(self, config); self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed()); Ok(res?) } @@ -813,7 +888,7 @@ where client: ClientVersionV1, ) -> RpcResult> { trace!(target: "rpc::engine", "Serving engine_getClientVersionV1"); - let res = Self::get_client_version_v1(self, client).await; + let res = Self::get_client_version_v1(self, client); Ok(res?) } @@ -842,8 +917,9 @@ mod tests { use reth_ethereum_engine_primitives::EthEngineTypes; use reth_testing_utils::generators::random_block; + use reth_chainspec::MAINNET; use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::{SealedBlock, B256, MAINNET}; + use reth_primitives::{SealedBlock, B256}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; @@ -887,7 +963,7 @@ mod tests { commit: "defa64b2".to_string(), }; let (_, api) = setup_engine_api(); - let res = api.get_client_version_v1(client.clone()).await; + let res = api.get_client_version_v1(client.clone()); assert_eq!(res.unwrap(), vec![client]); } @@ -927,7 +1003,7 @@ mod tests { // test [EngineApiMessage::GetPayloadBodiesByRange] for (start, count) in by_range_tests { - let res = api.get_payload_bodies_by_range(start, count).await; + let res = api.get_payload_bodies_by_range_v1(start, count).await; assert_matches!(res, Err(EngineApiError::InvalidBodiesRange { .. })); } } @@ -937,7 +1013,7 @@ mod tests { let (_, api) = setup_engine_api(); let request_count = MAX_PAYLOAD_BODIES_LIMIT + 1; - let res = api.get_payload_bodies_by_range(0, request_count).await; + let res = api.get_payload_bodies_by_range_v1(0, request_count).await; assert_matches!(res, Err(EngineApiError::PayloadRequestTooLarge { .. })); } @@ -957,7 +1033,7 @@ mod tests { .map(|b| Some(convert_to_payload_body_v1(b.unseal()))) .collect::>(); - let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); + let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); assert_eq!(res, expected); } @@ -998,7 +1074,7 @@ mod tests { }) .collect::>(); - let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); + let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); assert_eq!(res, expected); let expected = blocks @@ -1018,7 +1094,7 @@ mod tests { .collect::>(); let hashes = blocks.iter().map(|b| b.hash()).collect(); - let res = api.get_payload_bodies_by_hash(hashes).unwrap(); + let res = api.get_payload_bodies_by_hash_v1(hashes).unwrap(); assert_eq!(res, expected); } } @@ -1034,17 +1110,21 @@ mod tests { let (handle, api) = setup_engine_api(); let transition_config = TransitionConfiguration { - terminal_total_difficulty: handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap() + + terminal_total_difficulty: handle + .chain_spec + .fork(EthereumHardfork::Paris) + .ttd() + .unwrap() + U256::from(1), ..Default::default() }; - let res = api.exchange_transition_configuration(transition_config).await; + let res = api.exchange_transition_configuration(transition_config); assert_matches!( res, Err(EngineApiError::TerminalTD { execution, consensus }) - if execution == handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap() && consensus == U256::from(transition_config.terminal_total_difficulty) + if execution == handle.chain_spec.fork(EthereumHardfork::Paris).ttd().unwrap() && consensus == U256::from(transition_config.terminal_total_difficulty) ); } @@ -1061,13 +1141,17 @@ mod tests { random_block(&mut rng, terminal_block_number, None, None, None); let transition_config = TransitionConfiguration { - terminal_total_difficulty: handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap(), + terminal_total_difficulty: handle + .chain_spec + .fork(EthereumHardfork::Paris) + .ttd() + .unwrap(), terminal_block_hash: consensus_terminal_block.hash(), terminal_block_number: U64::from(terminal_block_number), }; // Unknown block number - let res = api.exchange_transition_configuration(transition_config).await; + let res = api.exchange_transition_configuration(transition_config); assert_matches!( res, @@ -1081,7 +1165,7 @@ mod tests { execution_terminal_block.clone().unseal(), ); - let res = api.exchange_transition_configuration(transition_config).await; + let res = api.exchange_transition_configuration(transition_config); assert_matches!( res, @@ -1099,14 +1183,18 @@ mod tests { random_block(&mut generators::rng(), terminal_block_number, None, None, None); let transition_config = TransitionConfiguration { - terminal_total_difficulty: handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap(), + terminal_total_difficulty: handle + .chain_spec + .fork(EthereumHardfork::Paris) + .ttd() + .unwrap(), terminal_block_hash: terminal_block.hash(), terminal_block_number: U64::from(terminal_block_number), }; handle.provider.add_block(terminal_block.hash(), terminal_block.unseal()); - let config = api.exchange_transition_configuration(transition_config).await.unwrap(); + let config = api.exchange_transition_configuration(transition_config).unwrap(); assert_eq!(config, transition_config); } } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 73489b7557b6..0ae97768b6c0 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -44,8 +44,12 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_payload_v4: Histogram, /// Latency for `engine_getPayloadBodiesByRangeV1` pub(crate) get_payload_bodies_by_range_v1: Histogram, + /// Latency for `engine_getPayloadBodiesByRangeV2` + pub(crate) get_payload_bodies_by_range_v2: Histogram, /// Latency for `engine_getPayloadBodiesByHashV1` pub(crate) get_payload_bodies_by_hash_v1: Histogram, + /// Latency for `engine_getPayloadBodiesByHashV2` + pub(crate) get_payload_bodies_by_hash_v2: Histogram, /// Latency for `engine_exchangeTransitionConfigurationV1` pub(crate) exchange_transition_configuration: Histogram, } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 94f011c02db3..0f2853f1f022 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -3,8 +3,7 @@ use alloy_rlp::{Decodable, Error as RlpError}; use assert_matches::assert_matches; use reth_primitives::{ - bytes::{Bytes, BytesMut}, - proofs, Block, SealedBlock, TransactionSigned, Withdrawals, B256, U256, + proofs, Block, Bytes, SealedBlock, TransactionSigned, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, @@ -59,20 +58,19 @@ fn payload_validation() { // Valid extra data let block_with_valid_extra_data = transform_block(block.clone(), |mut b| { - b.header.extra_data = BytesMut::zeroed(32).freeze().into(); + b.header.extra_data = Bytes::from_static(&[0; 32]); b }); assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None), Ok(_)); // Invalid extra data - let block_with_invalid_extra_data: Bytes = BytesMut::zeroed(33).freeze(); + let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]); let invalid_extra_data_block = transform_block(block.clone(), |mut b| { - b.header.extra_data = block_with_invalid_extra_data.clone().into(); + b.header.extra_data = block_with_invalid_extra_data.clone(); b }); assert_matches!( - try_into_sealed_block(invalid_extra_data_block,None), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -92,7 +90,7 @@ fn payload_validation() { let mut payload_with_invalid_txs: ExecutionPayloadV1 = block_to_payload_v1(block.clone()); payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { - *tx = Bytes::new().into(); + *tx = Bytes::new(); }); let payload_with_invalid_txs = try_payload_v1_to_block(payload_with_invalid_txs); assert_matches!(payload_with_invalid_txs, Err(PayloadError::Decode(RlpError::InputTooShort))); diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml new file mode 100644 index 000000000000..fb8d84b3f4b7 --- /dev/null +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "reth-rpc-eth-api" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Reth RPC 'eth' namespace API" + +[lints] +workspace = true + +[dependencies] +# reth +revm.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } +revm-primitives = { workspace = true, features = ["dev"] } +reth-errors.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-rpc-types.workspace = true +reth-rpc-types-compat.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-transaction-pool.workspace = true +reth-chainspec.workspace = true +reth-execution-types.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true + +# ethereum +alloy-dyn-abi = { workspace = true, features = ["eip712"] } + +# rpc +jsonrpsee = { workspace = true, features = ["server", "macros"] } + +# async +async-trait.workspace = true +futures.workspace = true +parking_lot.workspace = true +tokio.workspace = true + +# misc +auto_impl.workspace = true +dyn-clone.workspace = true +tracing.workspace = true + +[features] +client = ["jsonrpsee/client", "jsonrpsee/async-client"] +optimism = [ + "reth-primitives/optimism", + "revm/optimism", + "reth-provider/optimism", + "reth-rpc-eth-types/optimism" +] \ No newline at end of file diff --git a/crates/rpc/rpc-api/src/bundle.rs b/crates/rpc/rpc-eth-api/src/bundle.rs similarity index 98% rename from crates/rpc/rpc-api/src/bundle.rs rename to crates/rpc/rpc-eth-api/src/bundle.rs index 429f6948f8ab..bf3a623df2f1 100644 --- a/crates/rpc/rpc-api/src/bundle.rs +++ b/crates/rpc/rpc-eth-api/src/bundle.rs @@ -1,10 +1,10 @@ -//! Additional `eth_` functions for bundles +//! Additional `eth_` RPC API for bundles. //! //! See also use jsonrpsee::proc_macros::rpc; use reth_primitives::{Bytes, B256}; -use reth_rpc_types::{ +use reth_rpc_types::mev::{ CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, EthCallBundleResponse, EthSendBundle, PrivateTransactionRequest, }; diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs new file mode 100644 index 000000000000..3ba0a59e1000 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -0,0 +1,726 @@ +//! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for +//! the `eth_` namespace. + +use alloy_dyn_abi::TypedData; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_rpc_types::{ + serde_helpers::JsonStorageKey, + state::{EvmOverrides, StateOverride}, + AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, + EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, + StateContext, SyncStatus, Transaction, TransactionRequest, Work, +}; +use tracing::trace; + +use crate::helpers::{ + transaction::UpdateRawTxForwarder, EthApiSpec, EthBlocks, EthCall, EthFees, EthState, + EthTransactions, FullEthApi, +}; + +/// Helper trait, unifies functionality that must be supported to implement all RPC methods for +/// server. +pub trait FullEthApiServer: EthApiServer + FullEthApi + UpdateRawTxForwarder + Clone {} + +impl FullEthApiServer for T where T: EthApiServer + FullEthApi + UpdateRawTxForwarder + Clone {} + +/// Eth rpc interface: +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] +pub trait EthApi { + /// Returns the protocol version encoded as a string. + #[method(name = "protocolVersion")] + async fn protocol_version(&self) -> RpcResult; + + /// Returns an object with data about the sync status or false. + #[method(name = "syncing")] + fn syncing(&self) -> RpcResult; + + /// Returns the client coinbase address. + #[method(name = "coinbase")] + async fn author(&self) -> RpcResult
; + + /// Returns a list of addresses owned by client. + #[method(name = "accounts")] + fn accounts(&self) -> RpcResult>; + + /// Returns the number of most recent block. + #[method(name = "blockNumber")] + fn block_number(&self) -> RpcResult; + + /// Returns the chain ID of the current network. + #[method(name = "chainId")] + async fn chain_id(&self) -> RpcResult>; + + /// Returns information about a block by hash. + #[method(name = "getBlockByHash")] + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>; + + /// Returns information about a block by number. + #[method(name = "getBlockByNumber")] + async fn block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult>; + + /// Returns the number of transactions in a block from a block matching the given block hash. + #[method(name = "getBlockTransactionCountByHash")] + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the number of transactions in a block matching the given block number. + #[method(name = "getBlockTransactionCountByNumber")] + async fn block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Returns the number of uncles in a block from a block matching the given block hash. + #[method(name = "getUncleCountByBlockHash")] + async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the number of uncles in a block with given block number. + #[method(name = "getUncleCountByBlockNumber")] + async fn block_uncles_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Returns all transaction receipts for a given block. + #[method(name = "getBlockReceipts")] + async fn block_receipts( + &self, + block_id: BlockId, + ) -> RpcResult>>; + + /// Returns an uncle block of the given block and index. + #[method(name = "getUncleByBlockHashAndIndex")] + async fn uncle_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns an uncle block of the given block and index. + #[method(name = "getUncleByBlockNumberAndIndex")] + async fn uncle_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Returns the EIP-2718 encoded transaction if it exists. + /// + /// If this is a EIP-4844 transaction that is in the pool it will include the sidecar. + #[method(name = "getRawTransactionByHash")] + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the information about a transaction requested by transaction hash. + #[method(name = "getTransactionByHash")] + async fn transaction_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns information about a raw transaction by block hash and transaction index position. + #[method(name = "getRawTransactionByBlockHashAndIndex")] + async fn raw_transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns information about a transaction by block hash and transaction index position. + #[method(name = "getTransactionByBlockHashAndIndex")] + async fn transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns information about a raw transaction by block number and transaction index + /// position. + #[method(name = "getRawTransactionByBlockNumberAndIndex")] + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Returns information about a transaction by block number and transaction index position. + #[method(name = "getTransactionByBlockNumberAndIndex")] + async fn transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Returns the receipt of a transaction by transaction hash. + #[method(name = "getTransactionReceipt")] + async fn transaction_receipt(&self, hash: B256) -> RpcResult>; + + /// Returns the balance of the account of given address. + #[method(name = "getBalance")] + async fn balance(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the value from a storage position at a given address + #[method(name = "getStorageAt")] + async fn storage_at( + &self, + address: Address, + index: JsonStorageKey, + block_number: Option, + ) -> RpcResult; + + /// Returns the number of transactions sent from an address at given block number. + #[method(name = "getTransactionCount")] + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult; + + /// Returns code at a given address at given block number. + #[method(name = "getCode")] + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the block's header at given number. + #[method(name = "getHeaderByNumber")] + async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; + + /// Returns the block's header at given hash. + #[method(name = "getHeaderByHash")] + async fn header_by_hash(&self, hash: B256) -> RpcResult>; + + /// Executes a new message call immediately without creating a transaction on the block chain. + #[method(name = "call")] + async fn call( + &self, + request: TransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult; + + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the + /// optionality of state overrides + #[method(name = "callMany")] + async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> RpcResult>; + + /// Generates an access list for a transaction. + /// + /// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction. + /// + /// An access list contains all storage slots and addresses touched by the transaction, except + /// for the sender account and the chain's precompiles. + /// + /// It returns list of addresses and storage keys used by the transaction, plus the gas + /// consumed when the access list is added. That is, it gives you the list of addresses and + /// storage keys that will be used by that transaction, plus the gas consumed if the access + /// list is included. Like eth_estimateGas, this is an estimation; the list could change + /// when the transaction is actually mined. Adding an accessList to your transaction does + /// not necessary result in lower gas usage compared to a transaction without an access + /// list. + #[method(name = "createAccessList")] + async fn create_access_list( + &self, + request: TransactionRequest, + block_number: Option, + ) -> RpcResult; + + /// Generates and returns an estimate of how much gas is necessary to allow the transaction to + /// complete. + #[method(name = "estimateGas")] + async fn estimate_gas( + &self, + request: TransactionRequest, + block_number: Option, + state_override: Option, + ) -> RpcResult; + + /// Returns the current price per gas in wei. + #[method(name = "gasPrice")] + async fn gas_price(&self) -> RpcResult; + + /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. + #[method(name = "maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; + + /// Introduced in EIP-4844, returns the current blob base fee in wei. + #[method(name = "blobBaseFee")] + async fn blob_base_fee(&self) -> RpcResult; + + /// Returns the Transaction fee history + /// + /// Introduced in EIP-1559 for getting information on the appropriate priority fee to use. + /// + /// Returns transaction base fee per gas and effective priority fee per gas for the + /// requested/supported block range. The returned Fee history for the returned block range + /// can be a subsection of the requested range if not all blocks are available. + #[method(name = "feeHistory")] + async fn fee_history( + &self, + block_count: U64, + newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> RpcResult; + + /// Returns whether the client is actively mining new blocks. + #[method(name = "mining")] + async fn is_mining(&self) -> RpcResult; + + /// Returns the number of hashes per second that the node is mining with. + #[method(name = "hashrate")] + async fn hashrate(&self) -> RpcResult; + + /// Returns the hash of the current block, the seedHash, and the boundary condition to be met + /// (“target”) + #[method(name = "getWork")] + async fn get_work(&self) -> RpcResult; + + /// Used for submitting mining hashrate. + /// + /// Can be used for remote miners to submit their hash rate. + /// It accepts the miner hash rate and an identifier which must be unique between nodes. + /// Returns `true` if the block was successfully submitted, `false` otherwise. + #[method(name = "submitHashrate")] + async fn submit_hashrate(&self, hashrate: U256, id: B256) -> RpcResult; + + /// Used for submitting a proof-of-work solution. + #[method(name = "submitWork")] + async fn submit_work(&self, nonce: B64, pow_hash: B256, mix_digest: B256) -> RpcResult; + + /// Sends transaction; will block waiting for signer to return the + /// transaction hash. + #[method(name = "sendTransaction")] + async fn send_transaction(&self, request: TransactionRequest) -> RpcResult; + + /// Sends signed transaction, returning its hash. + #[method(name = "sendRawTransaction")] + async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult; + + /// Returns an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" + /// + len(message) + message))). + #[method(name = "sign")] + async fn sign(&self, address: Address, message: Bytes) -> RpcResult; + + /// Signs a transaction that can be submitted to the network at a later time using with + /// `sendRawTransaction.` + #[method(name = "signTransaction")] + async fn sign_transaction(&self, transaction: TransactionRequest) -> RpcResult; + + /// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). + #[method(name = "signTypedData")] + async fn sign_typed_data(&self, address: Address, data: TypedData) -> RpcResult; + + /// Returns the account and storage values of the specified account including the Merkle-proof. + /// This call can be used to verify that the data you are pulling from is not tampered with. + #[method(name = "getProof")] + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult; +} + +#[async_trait::async_trait] +impl EthApiServer for T +where + Self: FullEthApi, +{ + /// Handler for: `eth_protocolVersion` + async fn protocol_version(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_protocolVersion"); + EthApiSpec::protocol_version(self).await.to_rpc_result() + } + + /// Handler for: `eth_syncing` + fn syncing(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_syncing"); + EthApiSpec::sync_status(self).to_rpc_result() + } + + /// Handler for: `eth_coinbase` + async fn author(&self) -> RpcResult
{ + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_accounts` + fn accounts(&self) -> RpcResult> { + trace!(target: "rpc::eth", "Serving eth_accounts"); + Ok(EthApiSpec::accounts(self)) + } + + /// Handler for: `eth_blockNumber` + fn block_number(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_blockNumber"); + Ok(U256::from( + EthApiSpec::chain_info(self).with_message("failed to read chain info")?.best_number, + )) + } + + /// Handler for: `eth_chainId` + async fn chain_id(&self) -> RpcResult> { + trace!(target: "rpc::eth", "Serving eth_chainId"); + Ok(Some(EthApiSpec::chain_id(self))) + } + + /// Handler for: `eth_getBlockByHash` + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); + Ok(EthBlocks::rpc_block(self, hash.into(), full).await?) + } + + /// Handler for: `eth_getBlockByNumber` + async fn block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); + Ok(EthBlocks::rpc_block(self, number.into(), full).await?) + } + + /// Handler for: `eth_getBlockTransactionCountByHash` + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); + Ok(EthBlocks::block_transaction_count(self, hash.into()).await?.map(U256::from)) + } + + /// Handler for: `eth_getBlockTransactionCountByNumber` + async fn block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); + Ok(EthBlocks::block_transaction_count(self, number.into()).await?.map(U256::from)) + } + + /// Handler for: `eth_getUncleCountByBlockHash` + async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getUncleCountByBlockHash"); + Ok(EthBlocks::ommers(self, hash.into())?.map(|ommers| U256::from(ommers.len()))) + } + + /// Handler for: `eth_getUncleCountByBlockNumber` + async fn block_uncles_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, "Serving eth_getUncleCountByBlockNumber"); + Ok(EthBlocks::ommers(self, number.into())?.map(|ommers| U256::from(ommers.len()))) + } + + /// Handler for: `eth_getBlockReceipts` + async fn block_receipts( + &self, + block_id: BlockId, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); + Ok(EthBlocks::block_receipts(self, block_id).await?) + } + + /// Handler for: `eth_getUncleByBlockHashAndIndex` + async fn uncle_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getUncleByBlockHashAndIndex"); + Ok(EthBlocks::ommer_by_block_and_index(self, hash.into(), index).await?) + } + + /// Handler for: `eth_getUncleByBlockNumberAndIndex` + async fn uncle_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getUncleByBlockNumberAndIndex"); + Ok(EthBlocks::ommer_by_block_and_index(self, number.into(), index).await?) + } + + /// Handler for: `eth_getRawTransactionByHash` + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getRawTransactionByHash"); + Ok(EthTransactions::raw_transaction_by_hash(self, hash).await?) + } + + /// Handler for: `eth_getTransactionByHash` + async fn transaction_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); + Ok(EthTransactions::transaction_by_hash(self, hash).await?.map(Into::into)) + } + + /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` + async fn raw_transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + Ok(EthTransactions::raw_transaction_by_block_and_tx_index(self, hash.into(), index.into()) + .await?) + } + + /// Handler for: `eth_getTransactionByBlockHashAndIndex` + async fn transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); + Ok(EthTransactions::transaction_by_block_and_tx_index(self, hash.into(), index.into()) + .await?) + } + + /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); + Ok(EthTransactions::raw_transaction_by_block_and_tx_index( + self, + number.into(), + index.into(), + ) + .await?) + } + + /// Handler for: `eth_getTransactionByBlockNumberAndIndex` + async fn transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); + Ok(EthTransactions::transaction_by_block_and_tx_index(self, number.into(), index.into()) + .await?) + } + + /// Handler for: `eth_getTransactionReceipt` + async fn transaction_receipt(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); + Ok(EthTransactions::transaction_receipt(self, hash).await?) + } + + /// Handler for: `eth_getBalance` + async fn balance(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); + Ok(EthState::balance(self, address, block_number).await?) + } + + /// Handler for: `eth_getStorageAt` + async fn storage_at( + &self, + address: Address, + index: JsonStorageKey, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getStorageAt"); + let res: B256 = EthState::storage_at(self, address, index, block_number).await?; + Ok(res) + } + + /// Handler for: `eth_getTransactionCount` + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount"); + Ok(EthState::transaction_count(self, address, block_number).await?) + } + + /// Handler for: `eth_getCode` + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode"); + Ok(EthState::get_code(self, address, block_number).await?) + } + + /// Handler for: `eth_getHeaderByNumber` + async fn header_by_number(&self, block_number: BlockNumberOrTag) -> RpcResult> { + trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber"); + Ok(EthBlocks::rpc_block_header(self, block_number.into()).await?) + } + + /// Handler for: `eth_getHeaderByHash` + async fn header_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash"); + Ok(EthBlocks::rpc_block_header(self, hash.into()).await?) + } + + /// Handler for: `eth_call` + async fn call( + &self, + request: TransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult { + trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); + Ok(EthCall::call( + self, + request, + block_number, + EvmOverrides::new(state_overrides, block_overrides), + ) + .await?) + } + + /// Handler for: `eth_callMany` + async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?bundle, ?state_context, ?state_override, "Serving eth_callMany"); + Ok(EthCall::call_many(self, bundle, state_context, state_override).await?) + } + + /// Handler for: `eth_createAccessList` + async fn create_access_list( + &self, + request: TransactionRequest, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_createAccessList"); + let access_list_with_gas_used = + EthCall::create_access_list_at(self, request, block_number).await?; + + Ok(access_list_with_gas_used) + } + + /// Handler for: `eth_estimateGas` + async fn estimate_gas( + &self, + request: TransactionRequest, + block_number: Option, + state_override: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); + Ok(EthCall::estimate_gas_at( + self, + request, + block_number.unwrap_or_default(), + state_override, + ) + .await?) + } + + /// Handler for: `eth_gasPrice` + async fn gas_price(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_gasPrice"); + return Ok(EthFees::gas_price(self).await?) + } + + /// Handler for: `eth_maxPriorityFeePerGas` + async fn max_priority_fee_per_gas(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); + return Ok(EthFees::suggested_priority_fee(self).await?) + } + + /// Handler for: `eth_blobBaseFee` + async fn blob_base_fee(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_blobBaseFee"); + return Ok(EthFees::blob_base_fee(self).await?) + } + + // FeeHistory is calculated based on lazy evaluation of fees for historical blocks, and further + // caching of it in the LRU cache. + // When new RPC call is executed, the cache gets locked, we check it for the historical fees + // according to the requested block range, and fill any cache misses (in both RPC response + // and cache itself) with the actual data queried from the database. + // To minimize the number of database seeks required to query the missing data, we calculate the + // first non-cached block number and last non-cached block number. After that, we query this + // range of consecutive blocks from the database. + /// Handler for: `eth_feeHistory` + async fn fee_history( + &self, + block_count: U64, + newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> RpcResult { + trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); + return Ok( + EthFees::fee_history(self, block_count.to(), newest_block, reward_percentiles).await? + ) + } + + /// Handler for: `eth_mining` + async fn is_mining(&self) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_hashrate` + async fn hashrate(&self) -> RpcResult { + Ok(U256::ZERO) + } + + /// Handler for: `eth_getWork` + async fn get_work(&self) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_submitHashrate` + async fn submit_hashrate(&self, _hashrate: U256, _id: B256) -> RpcResult { + Ok(false) + } + + /// Handler for: `eth_submitWork` + async fn submit_work( + &self, + _nonce: B64, + _pow_hash: B256, + _mix_digest: B256, + ) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_sendTransaction` + async fn send_transaction(&self, request: TransactionRequest) -> RpcResult { + trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); + Ok(EthTransactions::send_transaction(self, request).await?) + } + + /// Handler for: `eth_sendRawTransaction` + async fn send_raw_transaction(&self, tx: Bytes) -> RpcResult { + trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransaction"); + Ok(EthTransactions::send_raw_transaction(self, tx).await?) + } + + /// Handler for: `eth_sign` + async fn sign(&self, address: Address, message: Bytes) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?message, "Serving eth_sign"); + Ok(EthTransactions::sign(self, address, message).await?) + } + + /// Handler for: `eth_signTransaction` + async fn sign_transaction(&self, _transaction: TransactionRequest) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_signTypedData` + async fn sign_typed_data(&self, address: Address, data: TypedData) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?data, "Serving eth_signTypedData"); + Ok(EthTransactions::sign_typed_data(self, &data, address)?) + } + + /// Handler for: `eth_getProof` + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?keys, ?block_number, "Serving eth_getProof"); + Ok(EthState::get_proof(self, address, keys, block_number)?.await?) + } +} diff --git a/crates/rpc/rpc-api/src/eth_filter.rs b/crates/rpc/rpc-eth-api/src/filter.rs similarity index 97% rename from crates/rpc/rpc-api/src/eth_filter.rs rename to crates/rpc/rpc-eth-api/src/filter.rs index 2e395d5bad76..da53b577eec5 100644 --- a/crates/rpc/rpc-api/src/eth_filter.rs +++ b/crates/rpc/rpc-eth-api/src/filter.rs @@ -1,5 +1,8 @@ +//! `eth_` RPC API for filtering. + use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_rpc_types::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; + /// Rpc Interface for poll-based ethereum filter API. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs new file mode 100644 index 000000000000..78f1ef9da66b --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -0,0 +1,234 @@ +//! Database access for `eth_` block RPC methods. Loads block and receipt data w.r.t. network. + +use std::sync::Arc; + +use futures::Future; +use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta}; +use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_types::{AnyTransactionReceipt, Header, Index, RichBlock}; +use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; + +use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; + +/// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the +/// `eth_` namespace. +pub trait EthBlocks: LoadBlock { + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl HeaderProvider; + + /// Returns the block header for the given block id. + fn rpc_block_header( + &self, + block_id: BlockId, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { Ok(self.rpc_block(block_id, false).await?.map(|block| block.inner.header)) } + } + + /// Returns the populated rpc block object for the given block id. + /// + /// If `full` is true, the block object will contain all transaction objects, otherwise it will + /// only contain the transaction hashes. + fn rpc_block( + &self, + block_id: BlockId, + full: bool, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + let block = match self.block_with_senders(block_id).await? { + Some(block) => block, + None => return Ok(None), + }; + let block_hash = block.hash(); + let total_difficulty = EthBlocks::provider(self) + .header_td_by_number(block.number)? + .ok_or(EthApiError::UnknownBlockNumber)?; + let block = + from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash))?; + Ok(Some(block.into())) + } + } + + /// Returns the number transactions in the given block. + /// + /// Returns `None` if the block does not exist + fn block_transaction_count( + &self, + block_id: BlockId, + ) -> impl Future>> + Send { + async move { + if block_id.is_pending() { + // Pending block can be fetched directly without need for caching + return Ok(LoadBlock::provider(self).pending_block()?.map(|block| block.body.len())) + } + + let block_hash = match LoadBlock::provider(self).block_hash_for_id(block_id)? { + Some(block_hash) => block_hash, + None => return Ok(None), + }; + + Ok(self.cache().get_block_transactions(block_hash).await?.map(|txs| txs.len())) + } + } + + /// Helper function for `eth_getBlockReceipts`. + /// + /// Returns all transaction receipts in block, or `None` if block wasn't found. + fn block_receipts( + &self, + block_id: BlockId, + ) -> impl Future>>> + Send + where + Self: LoadReceipt, + { + async move { + if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { + let block_number = block.number; + let base_fee = block.base_fee_per_gas; + let block_hash = block.hash(); + let excess_blob_gas = block.excess_blob_gas; + let timestamp = block.timestamp; + let block = block.unseal(); + + let receipts = block + .body + .into_iter() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (tx, receipt))| { + let meta = TransactionMeta { + tx_hash: tx.hash, + index: idx as u64, + block_hash, + block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + + ReceiptBuilder::new(&tx, meta, receipt, &receipts) + .map(|builder| builder.build()) + }) + .collect::>>(); + return receipts.map(Some) + } + + Ok(None) + } + } + + /// Helper method that loads a bock and all its receipts. + fn load_block_and_receipts( + &self, + block_id: BlockId, + ) -> impl Future>)>>> + Send + where + Self: LoadReceipt, + { + async move { + if block_id.is_pending() { + return Ok(LoadBlock::provider(self) + .pending_block_and_receipts()? + .map(|(sb, receipts)| (sb, Arc::new(receipts)))) + } + + if let Some(block_hash) = LoadBlock::provider(self).block_hash_for_id(block_id)? { + return Ok(LoadReceipt::cache(self).get_block_and_receipts(block_hash).await?) + } + + Ok(None) + } + } + + /// Returns uncle headers of given block. + /// + /// Returns an empty vec if there are none. + fn ommers(&self, block_id: BlockId) -> EthResult>> { + Ok(LoadBlock::provider(self).ommers_by_id(block_id)?) + } + + /// Returns uncle block at given index in given block. + /// + /// Returns `None` if index out of range. + fn ommer_by_block_and_index( + &self, + block_id: BlockId, + index: Index, + ) -> impl Future>> + Send { + async move { + let uncles = if block_id.is_pending() { + // Pending block can be fetched directly without need for caching + LoadBlock::provider(self).pending_block()?.map(|block| block.ommers) + } else { + LoadBlock::provider(self).ommers_by_id(block_id)? + } + .unwrap_or_default(); + + let index = usize::from(index); + let uncle = + uncles.into_iter().nth(index).map(|header| uncle_block_from_header(header).into()); + Ok(uncle) + } + } +} + +/// Loads a block from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { + // Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl BlockReaderIdExt; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns the block object for the given block id. + fn block( + &self, + block_id: BlockId, + ) -> impl Future>> + Send { + async move { + self.block_with_senders(block_id) + .await + .map(|maybe_block| maybe_block.map(|block| block.block)) + } + } + + /// Returns the block object for the given block id. + fn block_with_senders( + &self, + block_id: BlockId, + ) -> impl Future>> + Send { + async move { + if block_id.is_pending() { + // Pending block can be fetched directly without need for caching + let maybe_pending = + LoadPendingBlock::provider(self).pending_block_with_senders()?; + return if maybe_pending.is_some() { + Ok(maybe_pending) + } else { + self.local_pending_block().await + } + } + + let block_hash = match LoadPendingBlock::provider(self).block_hash_for_id(block_id)? { + Some(block_hash) => block_hash, + None => return Ok(None), + }; + + Ok(self.cache().get_sealed_block_with_senders(block_hash).await?) + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs new file mode 100644 index 000000000000..4a2c81b0fdfe --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs @@ -0,0 +1,65 @@ +//! Spawns a blocking task. CPU heavy tasks are executed with the `rayon` library. IO heavy tasks +//! are executed on the `tokio` runtime. + +use futures::Future; +use reth_rpc_eth_types::{EthApiError, EthResult}; +use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; +use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit}; + +/// Executes code on a blocking thread. +pub trait SpawnBlocking: Clone + Send + Sync + 'static { + /// Returns a handle for spawning IO heavy blocking tasks. + /// + /// Runtime access in default trait method implementations. + fn io_task_spawner(&self) -> impl TaskSpawner; + + /// Returns a handle for spawning CPU heavy blocking tasks. + /// + /// Thread pool access in default trait method implementations. + fn tracing_task_pool(&self) -> &BlockingTaskPool; + + /// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`). + fn acquire_owned( + &self, + ) -> impl Future> + Send; + + /// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`). + fn acquire_many_owned( + &self, + n: u32, + ) -> impl Future> + Send; + + /// Executes the future on a new blocking task. + /// + /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing + /// or CPU bound operations in general use [`spawn_tracing`](Self::spawn_tracing). + fn spawn_blocking_io(&self, f: F) -> impl Future> + Send + where + F: FnOnce(Self) -> EthResult + Send + 'static, + R: Send + 'static, + { + let (tx, rx) = oneshot::channel(); + let this = self.clone(); + self.io_task_spawner().spawn_blocking(Box::pin(async move { + let res = async move { f(this) }.await; + let _ = tx.send(res); + })); + + async move { rx.await.map_err(|_| EthApiError::InternalEthError)? } + } + + /// Executes a blocking task on the tracing pool. + /// + /// Note: This is expected for futures that are predominantly CPU bound, as it uses `rayon` + /// under the hood, for blocking IO futures use [`spawn_blocking`](Self::spawn_blocking_io). See + /// . + fn spawn_tracing(&self, f: F) -> impl Future> + Send + where + F: FnOnce(Self) -> EthResult + Send + 'static, + R: Send + 'static, + { + let this = self.clone(); + let fut = self.tracing_task_pool().spawn(move || f(this)); + async move { fut.await.map_err(|_| EthApiError::InternalBlockingTaskError)? } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs new file mode 100644 index 000000000000..0379292894cf --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -0,0 +1,779 @@ +//! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC +//! methods. + +use futures::Future; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::{ + revm_primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, + ResultAndState, TransactTo, + }, + Bytes, TransactionSignedEcRecovered, TxKind, B256, U256, +}; +use reth_provider::StateProvider; +use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; +use reth_rpc_eth_types::{ + cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, + error::ensure_success, + revm_utils::{ + apply_state_overrides, build_call_evm_env, caller_gas_allowance, + cap_tx_gas_limit_with_caller_allowance, get_precompiles, prepare_call_env, + }, + EthApiError, EthResult, RevertError, RpcInvalidTransactionError, StateCacheDb, +}; +use reth_rpc_server_types::constants::gas_oracle::{ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS}; +use reth_rpc_types::{ + state::{EvmOverrides, StateOverride}, + AccessListWithGasUsed, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, + TransactionRequest, +}; +use revm::{Database, DatabaseCommit}; +use revm_inspectors::access_list::AccessListInspector; +use tracing::trace; + +use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; + +/// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in +/// the `eth_` namespace. +pub trait EthCall: Call + LoadPendingBlock { + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send { + Call::estimate_gas_at(self, request, at, state_override) + } + + /// Executes the call request (`eth_call`) and returns the output + fn call( + &self, + request: TransactionRequest, + block_number: Option, + overrides: EvmOverrides, + ) -> impl Future> + Send { + async move { + let (res, _env) = + self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; + + ensure_success(res.result) + } + } + + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the + /// optionality of state overrides + fn call_many( + &self, + bundle: Bundle, + state_context: Option, + mut state_override: Option, + ) -> impl Future>> + Send + where + Self: LoadBlock, + { + async move { + let Bundle { transactions, block_override } = bundle; + if transactions.is_empty() { + return Err(EthApiError::InvalidParams(String::from("transactions are empty."))) + } + + let StateContext { transaction_index, block_number } = + state_context.unwrap_or_default(); + let transaction_index = transaction_index.unwrap_or_default(); + + let target_block = block_number.unwrap_or_default(); + let is_block_target_pending = target_block.is_pending(); + + let ((cfg, block_env, _), block) = futures::try_join!( + self.evm_env_at(target_block), + self.block_with_senders(target_block) + )?; + + let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber) }; + let gas_limit = self.call_gas_limit(); + + // we're essentially replaying the transactions in the block here, hence we need the + // state that points to the beginning of the block, which is the state at + // the parent block + let mut at = block.parent_hash; + let mut replay_block_txs = true; + + let num_txs = transaction_index.index().unwrap_or(block.body.len()); + // but if all transactions are to be replayed, we can use the state at the block itself, + // however only if we're not targeting the pending block, because for pending we can't + // rely on the block's state being available + if !is_block_target_pending && num_txs == block.body.len() { + at = block.hash(); + replay_block_txs = false; + } + + let this = self.clone(); + self.spawn_with_state_at_block(at.into(), move |state| { + let mut results = Vec::with_capacity(transactions.len()); + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + if replay_block_txs { + // only need to replay the transactions in the block if not all transactions are + // to be replayed + let transactions = block.into_transactions_ecrecovered().take(num_txs); + for tx in transactions { + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg.clone(), + block_env.clone(), + Call::evm_config(&this).tx_env(&tx), + ); + let (res, _) = this.transact(&mut db, env)?; + db.commit(res.state); + } + } + + let block_overrides = block_override.map(Box::new); + + let mut transactions = transactions.into_iter().peekable(); + while let Some(tx) = transactions.next() { + // apply state overrides only once, before the first transaction + let state_overrides = state_override.take(); + let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); + + let env = prepare_call_env( + cfg.clone(), + block_env.clone(), + tx, + gas_limit, + &mut db, + overrides, + )?; + let (res, _) = this.transact(&mut db, env)?; + + match ensure_success(res.result) { + Ok(output) => { + results.push(EthCallResponse { value: Some(output), error: None }); + } + Err(err) => { + results.push(EthCallResponse { + value: None, + error: Some(err.to_string()), + }); + } + } + + if transactions.peek().is_some() { + // need to apply the state changes of this call before executing the next + // call + db.commit(res.state); + } + } + + Ok(results) + }) + .await + } + } + + /// Creates [`AccessListWithGasUsed`] for the [`TransactionRequest`] at the given + /// [`BlockId`], or latest block. + fn create_access_list_at( + &self, + request: TransactionRequest, + block_number: Option, + ) -> impl Future> + Send + where + Self: Trace, + { + async move { + let block_id = block_number.unwrap_or_default(); + let (cfg, block, at) = self.evm_env_at(block_id).await?; + + self.spawn_blocking_io(move |this| { + this.create_access_list_with(cfg, block, at, request) + }) + .await + } + } + + /// Creates [`AccessListWithGasUsed`] for the [`TransactionRequest`] at the given + /// [`BlockId`]. + fn create_access_list_with( + &self, + cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + at: BlockId, + mut request: TransactionRequest, + ) -> EthResult + where + Self: Trace, + { + let state = self.state_at_block_id(at)?; + + let mut env = build_call_evm_env(cfg, block, request.clone())?; + + // we want to disable this in eth_createAccessList, since this is common practice used by + // other node impls and providers + env.cfg.disable_block_gas_limit = true; + + // The basefee should be ignored for eth_createAccessList + // See: + // + env.cfg.disable_base_fee = true; + + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + if request.gas.is_none() && env.tx.gas_price > U256::ZERO { + // no gas limit was provided in the request, so we need to cap the request's gas limit + cap_tx_gas_limit_with_caller_allowance(&mut db, &mut env.tx)?; + } + + let from = request.from.unwrap_or_default(); + let to = if let Some(TxKind::Call(to)) = request.to { + to + } else { + let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; + from.create(nonce) + }; + + // can consume the list since we're not using the request anymore + let initial = request.access_list.take().unwrap_or_default(); + + let precompiles = get_precompiles(env.handler_cfg.spec_id); + let mut inspector = AccessListInspector::new(initial, from, to, precompiles); + let (result, env) = self.inspect(&mut db, env, &mut inspector)?; + + match result.result { + ExecutionResult::Halt { reason, .. } => Err(match reason { + HaltReason::NonceOverflow => RpcInvalidTransactionError::NonceMaxValue, + halt => RpcInvalidTransactionError::EvmHalt(halt), + }), + ExecutionResult::Revert { output, .. } => { + Err(RpcInvalidTransactionError::Revert(RevertError::new(output))) + } + ExecutionResult::Success { .. } => Ok(()), + }?; + + let access_list = inspector.into_access_list(); + + let cfg_with_spec_id = + CfgEnvWithHandlerCfg { cfg_env: env.cfg.clone(), handler_cfg: env.handler_cfg }; + + // calculate the gas used using the access list + request.access_list = Some(access_list.clone()); + let gas_used = + self.estimate_gas_with(cfg_with_spec_id, env.block.clone(), request, &*db.db, None)?; + + Ok(AccessListWithGasUsed { access_list, gas_used }) + } +} + +/// Executes code on state. +pub trait Call: LoadState + SpawnBlocking { + /// Returns default gas limit to use for `eth_call` and tracing RPC methods. + /// + /// Data access in default trait method implementations. + fn call_gas_limit(&self) -> u64; + + /// Returns a handle for reading evm config. + /// + /// Data access in default (L1) trait method implementations. + fn evm_config(&self) -> &impl ConfigureEvm; + + /// Executes the closure with the state that corresponds to the given [`BlockId`]. + fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult + where + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult, + { + let state = self.state_at_block_id(at)?; + f(StateProviderTraitObjWrapper(&state)) + } + + /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state + /// changes. + fn transact( + &self, + db: DB, + env: EnvWithHandlerCfg, + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> + where + DB: Database, + ::Error: Into, + { + let mut evm = self.evm_config().evm_with_env(db, env); + let res = evm.transact()?; + let (_, env) = evm.into_db_and_env_with_handler_cfg(); + Ok((res, env)) + } + + /// Executes the call request at the given [`BlockId`]. + fn transact_call_at( + &self, + request: TransactionRequest, + at: BlockId, + overrides: EvmOverrides, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + let this = self.clone(); + self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)) + } + + /// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task + fn spawn_with_state_at_block( + &self, + at: BlockId, + f: F, + ) -> impl Future> + Send + where + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult + Send + 'static, + T: Send + 'static, + { + self.spawn_tracing(move |this| { + let state = this.state_at_block_id(at)?; + f(StateProviderTraitObjWrapper(&state)) + }) + } + + /// Prepares the state and env for the given [`TransactionRequest`] at the given [`BlockId`] and + /// executes the closure on a new task returning the result of the closure. + /// + /// This returns the configured [`EnvWithHandlerCfg`] for the given [`TransactionRequest`] at + /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. + fn spawn_with_call_at( + &self, + request: TransactionRequest, + at: BlockId, + overrides: EvmOverrides, + f: F, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + let this = self.clone(); + self.spawn_tracing(move |_| { + let state = this.state_at_block_id(at)?; + let mut db = + CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + + let env = prepare_call_env( + cfg, + block_env, + request, + this.call_gas_limit(), + &mut db, + overrides, + )?; + + f(StateCacheDbRefMutWrapper(&mut db), env) + }) + .await + .map_err(|_| EthApiError::InternalBlockingTaskError) + } + } + + /// Retrieves the transaction if it exists and executes it. + /// + /// Before the transaction is executed, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed + /// and the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool). + fn spawn_replay_transaction( + &self, + hash: B256, + f: F, + ) -> impl Future>> + Send + where + Self: LoadBlock + LoadPendingBlock + LoadTransaction, + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + async move { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg, + block_env, + Call::evm_config(&this).tx_env(&tx), + ); + + let (res, _) = this.transact(&mut db, env)?; + f(tx_info, res, db) + }) + .await + .map(Some) + } + } + + /// Replays all the transactions until the target transaction is found. + /// + /// All transactions before the target transaction are executed and their changes are written to + /// the _runtime_ db ([`CacheDB`]). + /// + /// Note: This assumes the target transaction is in the given iterator. + /// Returns the index of the target transaction in the given iterator. + fn replay_transactions_until( + &self, + db: &mut CacheDB, + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + transactions: impl IntoIterator, + target_tx_hash: B256, + ) -> Result + where + DB: DatabaseRef, + EthApiError: From<::Error>, + { + let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); + + let mut evm = self.evm_config().evm_with_env(db, env); + let mut index = 0; + for tx in transactions { + if tx.hash() == target_tx_hash { + // reached the target transaction + break + } + + let sender = tx.signer(); + self.evm_config().fill_tx_env(evm.tx_mut(), &tx.into_signed(), sender); + evm.transact_commit()?; + index += 1; + } + Ok(index) + } + + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(at)?; + this.estimate_gas_with(cfg, block_env, request, state, state_override) + }) + .await + } + } + + /// Estimates the gas usage of the `request` with the state. + /// + /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search + fn estimate_gas_with( + &self, + mut cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + request: TransactionRequest, + state: S, + state_override: Option, + ) -> EthResult + where + S: StateProvider, + { + // Disabled because eth_estimateGas is sometimes used with eoa senders + // See + cfg.disable_eip3607 = true; + + // The basefee should be ignored for eth_createAccessList + // See: + // + cfg.disable_base_fee = true; + + // Keep a copy of gas related request values + let tx_request_gas_limit = request.gas; + let tx_request_gas_price = request.gas_price; + let block_env_gas_limit = block.gas_limit; + + // Determine the highest possible gas limit, considering both the request's specified limit + // and the block's limit. + let mut highest_gas_limit = tx_request_gas_limit + .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) + .unwrap_or(block_env_gas_limit); + + // Configure the evm env + let mut env = build_call_evm_env(cfg, block, request)?; + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // Apply any state overrides if specified. + if let Some(state_override) = state_override { + apply_state_overrides(state_override, &mut db)?; + } + + // Optimize for simple transfer transactions, potentially reducing the gas estimate. + if env.tx.data.is_empty() { + if let TransactTo::Call(to) = env.tx.transact_to { + if let Ok(code) = db.db.account_code(to) { + let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); + if no_code_callee { + // If the tx is a simple transfer (call to an account with no code) we can + // shortcircuit. But simply returning + // `MIN_TRANSACTION_GAS` is dangerous because there might be additional + // field combos that bump the price up, so we try executing the function + // with the minimum gas limit to make sure. + let mut env = env.clone(); + env.tx.gas_limit = MIN_TRANSACTION_GAS; + if let Ok((res, _)) = self.transact(&mut db, env) { + if res.result.is_success() { + return Ok(U256::from(MIN_TRANSACTION_GAS)) + } + } + } + } + } + } + + // Check funds of the sender (only useful to check if transaction gas price is more than 0). + // + // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` + if env.tx.gas_price > U256::ZERO { + // cap the highest gas limit by max gas caller can afford with given gas price + highest_gas_limit = highest_gas_limit.min(caller_gas_allowance(&mut db, &env.tx)?); + } + + // We can now normalize the highest gas limit to a u64 + let mut highest_gas_limit: u64 = highest_gas_limit.try_into().unwrap_or(u64::MAX); + + // If the provided gas limit is less than computed cap, use that + env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); + + trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); + + // Execute the transaction with the highest possible gas limit. + let (mut res, mut env) = match self.transact(&mut db, env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much gas. + // If the gas price or gas limit was specified in the request, retry the transaction + // with the block's gas limit to determine if the failure was due to + // insufficient gas. + Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) + if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() => + { + return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; + + let gas_refund = match res.result { + ExecutionResult::Success { gas_refunded, .. } => gas_refunded, + ExecutionResult::Halt { reason, gas_used } => { + // here we don't check for invalid opcode because already executed with highest gas + // limit + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into()) + } + ExecutionResult::Revert { output, .. } => { + // if price or limit was included in the request then we can execute the request + // again with the block's gas limit to check if revert is gas related or not + return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { + Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } else { + // the transaction did revert + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into()) + } + } + }; + + // At this point we know the call succeeded but want to find the _best_ (lowest) gas the + // transaction succeeds with. We find this by doing a binary search over the possible range. + // + // NOTE: this is the gas the transaction used, which is less than the + // transaction requires to succeed. + let mut gas_used = res.result.gas_used(); + // the lowest value is capped by the gas used by the unconstrained transaction + let mut lowest_gas_limit = gas_used.saturating_sub(1); + + // As stated in Geth, there is a good chance that the transaction will pass if we set the + // gas limit to the execution gas used plus the gas refund, so we check this first + // 1 { + // An estimation error is allowed once the current gas limit range used in the binary + // search is small enough (less than 1.5% of the highest gas limit) + // { + // Increase the lowest gas limit if gas is too high + lowest_gas_limit = mid_gas_limit; + } + // Handle other cases, including successful transactions. + ethres => { + // Unpack the result and environment if the transaction was successful. + (res, env) = ethres?; + // Update the estimated gas range based on the transaction result. + self.update_estimated_gas_range( + res.result, + mid_gas_limit, + &mut highest_gas_limit, + &mut lowest_gas_limit, + )?; + } + } + + // New midpoint + mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; + } + + Ok(U256::from(highest_gas_limit)) + } + + /// Updates the highest and lowest gas limits for binary search based on the execution result. + /// + /// This function refines the gas limit estimates used in a binary search to find the optimal + /// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on + /// whether the execution succeeded, reverted, or halted due to specific reasons. + #[inline] + fn update_estimated_gas_range( + &self, + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, + ) -> EthResult<()> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into()) + } + } + } + }; + + Ok(()) + } + + /// Executes the requests again after an out of gas error to check if the error is gas related + /// or not + #[inline] + fn map_out_of_gas_err( + &self, + env_gas_limit: U256, + mut env: EnvWithHandlerCfg, + db: &mut CacheDB>, + ) -> EthApiError + where + S: StateProvider, + { + let req_gas_limit = env.tx.gas_limit; + env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); + let (res, _) = match self.transact(db, env) { + Ok(res) => res, + Err(err) => return err, + }; + match res.result { + ExecutionResult::Success { .. } => { + // transaction succeeded by manually increasing the gas limit to + // highest, which means the caller lacks funds to pay for the tx + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into() + } + ExecutionResult::Revert { output, .. } => { + // reverted again after bumping the limit + RpcInvalidTransactionError::Revert(RevertError::new(output)).into() + } + ExecutionResult::Halt { reason, .. } => { + RpcInvalidTransactionError::EvmHalt(reason).into() + } + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs new file mode 100644 index 000000000000..54c577ea2504 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -0,0 +1,346 @@ +//! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. + +use futures::Future; +use reth_primitives::U256; +use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; +use reth_rpc_eth_types::{ + fee_history::calculate_reward_percentiles_for_block, EthApiError, EthResult, EthStateCache, + FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, +}; +use reth_rpc_types::{BlockNumberOrTag, FeeHistory}; +use tracing::debug; + +use super::LoadBlock; + +/// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the +/// `eth_` namespace. +pub trait EthFees: LoadFee { + /// Returns a suggestion for a gas price for legacy transactions. + /// + /// See also: + fn gas_price(&self) -> impl Future> + Send + where + Self: LoadBlock, + { + LoadFee::gas_price(self) + } + + /// Returns a suggestion for a base fee for blob transactions. + fn blob_base_fee(&self) -> impl Future> + Send + where + Self: LoadBlock, + { + LoadFee::blob_base_fee(self) + } + + /// Returns a suggestion for the priority fee (the tip) + fn suggested_priority_fee(&self) -> impl Future> + Send + where + Self: 'static, + { + LoadFee::suggested_priority_fee(self) + } + + /// Reports the fee history, for the given amount of blocks, up until the given newest block. + /// + /// If `reward_percentiles` are provided the [`FeeHistory`] will include the _approximated_ + /// rewards for the requested range. + fn fee_history( + &self, + mut block_count: u64, + newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> impl Future> + Send { + async move { + if block_count == 0 { + return Ok(FeeHistory::default()) + } + + // See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225 + let max_fee_history = if reward_percentiles.is_none() { + self.gas_oracle().config().max_header_history + } else { + self.gas_oracle().config().max_block_history + }; + + if block_count > max_fee_history { + debug!( + requested = block_count, + truncated = max_fee_history, + "Sanitizing fee history block count" + ); + block_count = max_fee_history + } + + let Some(end_block) = + LoadFee::provider(self).block_number_for_id(newest_block.into())? + else { + return Err(EthApiError::UnknownBlockNumber) + }; + + // need to add 1 to the end block to get the correct (inclusive) range + let end_block_plus = end_block + 1; + // Ensure that we would not be querying outside of genesis + if end_block_plus < block_count { + block_count = end_block_plus; + } + + // If reward percentiles were specified, we + // need to validate that they are monotonically + // increasing and 0 <= p <= 100 + // Note: The types used ensure that the percentiles are never < 0 + if let Some(percentiles) = &reward_percentiles { + if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { + return Err(EthApiError::InvalidRewardPercentiles) + } + } + + // Fetch the headers and ensure we got all of them + // + // Treat a request for 1 block as a request for `newest_block..=newest_block`, + // otherwise `newest_block - 2 + // NOTE: We ensured that block count is capped + let start_block = end_block_plus - block_count; + + // Collect base fees, gas usage ratios and (optionally) reward percentile data + let mut base_fee_per_gas: Vec = Vec::new(); + let mut gas_used_ratio: Vec = Vec::new(); + + let mut base_fee_per_blob_gas: Vec = Vec::new(); + let mut blob_gas_used_ratio: Vec = Vec::new(); + + let mut rewards: Vec> = Vec::new(); + + // Check if the requested range is within the cache bounds + let fee_entries = self.fee_history_cache().get_history(start_block, end_block).await; + + if let Some(fee_entries) = fee_entries { + if fee_entries.len() != block_count as usize { + return Err(EthApiError::InvalidBlockRange) + } + + for entry in &fee_entries { + base_fee_per_gas.push(entry.base_fee_per_gas as u128); + gas_used_ratio.push(entry.gas_used_ratio); + base_fee_per_blob_gas.push(entry.base_fee_per_blob_gas.unwrap_or_default()); + blob_gas_used_ratio.push(entry.blob_gas_used_ratio); + + if let Some(percentiles) = &reward_percentiles { + let mut block_rewards = Vec::with_capacity(percentiles.len()); + for &percentile in percentiles { + block_rewards.push(self.approximate_percentile(entry, percentile)); + } + rewards.push(block_rewards); + } + } + let last_entry = fee_entries.last().expect("is not empty"); + + // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the + // next block + base_fee_per_gas + .push(last_entry.next_block_base_fee(&LoadFee::provider(self).chain_spec()) + as u128); + + base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); + } else { + // read the requested header range + let headers = LoadFee::provider(self).sealed_headers_range(start_block..=end_block)?; + if headers.len() != block_count as usize { + return Err(EthApiError::InvalidBlockRange) + } + + for header in &headers { + base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); + gas_used_ratio.push(header.gas_used as f64 / header.gas_limit as f64); + base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); + blob_gas_used_ratio.push( + header.blob_gas_used.unwrap_or_default() as f64 / + reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + ); + + // Percentiles were specified, so we need to collect reward percentile ino + if let Some(percentiles) = &reward_percentiles { + let (transactions, receipts) = LoadFee::cache(self) + .get_transactions_and_receipts(header.hash()) + .await? + .ok_or(EthApiError::InvalidBlockRange)?; + rewards.push( + calculate_reward_percentiles_for_block( + percentiles, + header.gas_used, + header.base_fee_per_gas.unwrap_or_default(), + &transactions, + &receipts, + ) + .unwrap_or_default(), + ); + } + } + + // The spec states that `base_fee_per_gas` "[..] includes the next block after the + // newest of the returned range, because this value can be derived from the + // newest block" + // + // The unwrap is safe since we checked earlier that we got at least 1 header. + let last_header = headers.last().expect("is present"); + base_fee_per_gas.push( + LoadFee::provider(self).chain_spec().base_fee_params_at_timestamp(last_header.timestamp).next_block_base_fee( + last_header.gas_used as u128, + last_header.gas_limit as u128, + last_header.base_fee_per_gas.unwrap_or_default() as u128, + )); + + // Same goes for the `base_fee_per_blob_gas`: + // > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block. + base_fee_per_blob_gas + .push(last_header.next_block_blob_fee().unwrap_or_default()); + }; + + Ok(FeeHistory { + base_fee_per_gas, + gas_used_ratio, + base_fee_per_blob_gas, + blob_gas_used_ratio, + oldest_block: start_block, + reward: reward_percentiles.map(|_| rewards), + }) + } + } + + /// Approximates reward at a given percentile for a specific block + /// Based on the configured resolution + fn approximate_percentile(&self, entry: &FeeHistoryEntry, requested_percentile: f64) -> u128 { + let resolution = self.fee_history_cache().resolution(); + let rounded_percentile = + (requested_percentile * resolution as f64).round() / resolution as f64; + let clamped_percentile = rounded_percentile.clamp(0.0, 100.0); + + // Calculate the index in the precomputed rewards array + let index = (clamped_percentile / (1.0 / resolution as f64)).round() as usize; + // Fetch the reward from the FeeHistoryEntry + entry.rewards.get(index).cloned().unwrap_or_default() + } +} + +/// Loads fee from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. +pub trait LoadFee: LoadBlock { + // Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns a handle for reading gas price. + /// + /// Data access in default (L1) trait method implementations. + fn gas_oracle(&self) -> &GasPriceOracle; + + /// Returns a handle for reading fee history data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn fee_history_cache(&self) -> &FeeHistoryCache; + + /// Returns the gas price if it is set, otherwise fetches a suggested gas price for legacy + /// transactions. + fn legacy_gas_price( + &self, + gas_price: Option, + ) -> impl Future> + Send { + async move { + match gas_price { + Some(gas_price) => Ok(gas_price), + None => { + // fetch a suggested gas price + self.gas_price().await + } + } + } + } + + /// Returns the EIP-1559 fees if they are set, otherwise fetches a suggested gas price for + /// EIP-1559 transactions. + /// + /// Returns (`max_fee`, `priority_fee`) + fn eip1559_fees( + &self, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + ) -> impl Future> + Send { + async move { + let max_fee_per_gas = match max_fee_per_gas { + Some(max_fee_per_gas) => max_fee_per_gas, + None => { + // fetch pending base fee + let base_fee = self + .block(BlockNumberOrTag::Pending.into()) + .await? + .ok_or(EthApiError::UnknownBlockNumber)? + .base_fee_per_gas + .ok_or_else(|| { + EthApiError::InvalidTransaction( + RpcInvalidTransactionError::TxTypeNotSupported, + ) + })?; + U256::from(base_fee) + } + }; + + let max_priority_fee_per_gas = match max_priority_fee_per_gas { + Some(max_priority_fee_per_gas) => max_priority_fee_per_gas, + None => self.suggested_priority_fee().await?, + }; + Ok((max_fee_per_gas, max_priority_fee_per_gas)) + } + } + + /// Returns the EIP-4844 blob fee if it is set, otherwise fetches a blob fee. + fn eip4844_blob_fee( + &self, + blob_fee: Option, + ) -> impl Future> + Send { + async move { + match blob_fee { + Some(blob_fee) => Ok(blob_fee), + None => self.blob_base_fee().await, + } + } + } + + /// Returns a suggestion for a gas price for legacy transactions. + /// + /// See also: + fn gas_price(&self) -> impl Future> + Send { + let header = self.block(BlockNumberOrTag::Latest.into()); + let suggested_tip = self.suggested_priority_fee(); + async move { + let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; + let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); + Ok(suggested_tip + U256::from(base_fee)) + } + } + + /// Returns a suggestion for a base fee for blob transactions. + fn blob_base_fee(&self) -> impl Future> + Send { + async move { + self.block(BlockNumberOrTag::Latest.into()) + .await? + .and_then(|h: reth_primitives::SealedBlock| h.next_block_blob_fee()) + .ok_or(EthApiError::ExcessBlobGasNotSet) + .map(U256::from) + } + } + + /// Returns a suggestion for the priority fee (the tip) + fn suggested_priority_fee(&self) -> impl Future> + Send + where + Self: 'static, + { + self.gas_oracle().suggest_tip_cap() + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs new file mode 100644 index 000000000000..72e49077efb8 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -0,0 +1,67 @@ +//! Behaviour needed to serve `eth_` RPC requests, divided into general database reads and +//! specific database access. +//! +//! Traits with `Load` prefix, read atomic data from database, e.g. a block or transaction. Any +//! database read done in more than one default `Eth` trait implementation, is defined in a `Load` +//! trait. +//! +//! Traits with `Eth` prefix, compose specific data needed to serve RPC requests in the `eth` +//! namespace. They use `Load` traits as building blocks. [`EthTransactions`] also writes data +//! (submits transactions). Based on the `eth_` request method semantics, request methods are +//! divided into: [`EthTransactions`], [`EthBlocks`], [`EthFees`], [`EthState`] and [`EthCall`]. +//! Default implementation of the `Eth` traits, is done w.r.t. L1. +//! +//! [`EthApiServer`](crate::EthApiServer), is implemented for any type that implements +//! all the `Eth` traits, e.g. `reth_rpc::EthApi`. + +pub mod block; +pub mod blocking_task; +pub mod call; +pub mod fee; +pub mod pending_block; +pub mod receipt; +pub mod signer; +pub mod spec; +pub mod state; +pub mod trace; +pub mod transaction; + +pub use block::{EthBlocks, LoadBlock}; +pub use blocking_task::SpawnBlocking; +pub use call::{Call, EthCall}; +pub use fee::{EthFees, LoadFee}; +pub use pending_block::LoadPendingBlock; +pub use receipt::LoadReceipt; +pub use signer::EthSigner; +pub use spec::EthApiSpec; +pub use state::{EthState, LoadState}; +pub use trace::Trace; +pub use transaction::{EthTransactions, LoadTransaction, UpdateRawTxForwarder}; + +/// Extension trait that bundles traits needed for tracing transactions. +pub trait TraceExt: + LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call +{ +} + +impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + Trace + Call {} + +/// Helper trait to unify all `eth` rpc server building block traits, for simplicity. +/// +/// This trait is automatically implemented for any type that implements all the `Eth` traits. +pub trait FullEthApi: + EthApiSpec + EthTransactions + EthBlocks + EthState + EthCall + EthFees + Trace + LoadReceipt +{ +} + +impl FullEthApi for T where + T: EthApiSpec + + EthTransactions + + EthBlocks + + EthState + + EthCall + + EthFees + + Trace + + LoadReceipt +{ +} diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs similarity index 52% rename from crates/rpc/rpc/src/eth/api/pending_block.rs rename to crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index be42d3b76b45..20b34b8c1d66 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -1,61 +1,209 @@ -//! Support for building a pending block via local txpool. +//! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace +//! RPC methods. -use crate::eth::error::{EthApiError, EthResult}; -use reth_errors::ProviderError; +use std::time::{Duration, Instant}; + +use futures::Future; +use reth_chainspec::EthereumHardforks; +use reth_evm::{system_calls::pre_block_beacon_root_contract_call, ConfigureEvm, ConfigureEvmEnv}; +use reth_execution_types::ExecutionOutcome; use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_ROOT_HASH}, - proofs, - revm::env::tx_env_with_recovered, + proofs::calculate_transaction_root, revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, InvalidTransaction, ResultAndState, SpecId, + BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + ResultAndState, SpecId, }, - Block, BlockId, BlockNumberOrTag, ChainSpec, Header, IntoRecoveredTransaction, Receipt, - Requests, SealedBlockWithSenders, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Block, BlockNumber, Header, IntoRecoveredTransaction, Receipt, Requests, + SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, B256, + EMPTY_OMMER_ROOT_HASH, U256, +}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, }; -use reth_provider::{ChainSpecProvider, ExecutionOutcome, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, - state_change::{ - apply_beacon_root_contract_call, apply_blockhashes_update, - post_block_withdrawals_balance_increments, - }, + database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, +}; +use reth_rpc_eth_types::{ + pending_block::pre_block_blockhashes_update, EthApiError, EthResult, PendingBlock, + PendingBlockEnv, PendingBlockEnvOrigin, }; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; -use revm::{db::states::bundle_state::BundleRetention, Database, DatabaseCommit, State}; -use revm_primitives::EnvWithHandlerCfg; -use std::time::Instant; - -/// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block -#[derive(Debug, Clone)] -pub(crate) struct PendingBlockEnv { - /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. - pub(crate) cfg: CfgEnvWithHandlerCfg, - /// Configured [`BlockEnv`] for the pending block. - pub(crate) block_env: BlockEnv, - /// Origin block for the config - pub(crate) origin: PendingBlockEnvOrigin, -} +use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; +use tokio::sync::Mutex; +use tracing::debug; + +use super::SpawnBlocking; + +/// Loads a pending block from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. +#[auto_impl::auto_impl(&, Arc)] +pub trait LoadPendingBlock { + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory; + + /// Returns a handle for reading data from transaction pool. + /// + /// Data access in default (L1) trait method implementations. + fn pool(&self) -> impl TransactionPool; + + /// Returns a handle to the pending block. + /// + /// Data access in default (L1) trait method implementations. + fn pending_block(&self) -> &Mutex>; + + /// Returns a handle for reading evm config. + /// + /// Data access in default (L1) trait method implementations. + fn evm_config(&self) -> &impl ConfigureEvm; + + /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block + /// + /// If no pending block is available, this will derive it from the `latest` block + fn pending_block_env_and_cfg(&self) -> EthResult { + let origin: PendingBlockEnvOrigin = if let Some(pending) = + self.provider().pending_block_with_senders()? + { + PendingBlockEnvOrigin::ActualPending(pending) + } else { + // no pending block from the CL yet, so we use the latest block and modify the env + // values that we can + let latest = + self.provider().latest_header()?.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + + let (mut latest_header, block_hash) = latest.split(); + // child block + latest_header.number += 1; + // assumed child block is in the next slot: 12s + latest_header.timestamp += 12; + // base fee of the child block + let chain_spec = self.provider().chain_spec(); + + latest_header.base_fee_per_gas = latest_header.next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), + ); + + // update excess blob gas consumed above target + latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); + + // we're reusing the same block hash because we need this to lookup the block's state + let latest = SealedHeader::new(latest_header, block_hash); + + PendingBlockEnvOrigin::DerivedFromLatest(latest) + }; + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); + + let mut block_env = BlockEnv::default(); + // Note: for the PENDING block we assume it is past the known merge block and thus this will + // not fail when looking up the total difficulty value for the blockenv. + self.provider().fill_env_with_header( + &mut cfg, + &mut block_env, + origin.header(), + self.evm_config().clone(), + )?; + + Ok(PendingBlockEnv::new(cfg, block_env, origin)) + } + + /// Returns the locally built pending block + fn local_pending_block( + &self, + ) -> impl Future>> + Send + where + Self: SpawnBlocking, + { + async move { + let pending = self.pending_block_env_and_cfg()?; + if pending.origin.is_actual_pending() { + return Ok(pending.origin.into_actual_pending()) + } + + let mut lock = self.pending_block().lock().await; + + let now = Instant::now(); + + // check if the block is still good + if let Some(pending_block) = lock.as_ref() { + // this is guaranteed to be the `latest` header + if pending.block_env.number.to::() == pending_block.block.number && + pending.origin.header().hash() == pending_block.block.parent_hash && + now <= pending_block.expires_at + { + return Ok(Some(pending_block.block.clone())) + } + } + + // no pending block from the CL yet, so we need to build it ourselves via txpool + let pending_block = match self + .spawn_blocking_io(move |this| { + // we rebuild the block + this.build_block(pending) + }) + .await + { + Ok(block) => block, + Err(err) => { + debug!(target: "rpc", "Failed to build pending block: {:?}", err); + return Ok(None) + } + }; + + let now = Instant::now(); + *lock = Some(PendingBlock::new(pending_block.clone(), now + Duration::from_secs(1))); + + Ok(Some(pending_block)) + } + } + + /// Assembles a [`Receipt`] for a transaction, based on its [`ExecutionResult`]. + fn assemble_receipt( + &self, + tx: &TransactionSignedEcRecovered, + result: ExecutionResult, + cumulative_gas_used: u64, + ) -> Receipt { + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + } + } -impl PendingBlockEnv { - /// Builds a pending block using the given client and pool. + /// Calculates receipts root in block building. + /// + /// Panics if block is not in the [`ExecutionOutcome`]'s block range. + fn receipts_root( + &self, + _block_env: &BlockEnv, + execution_outcome: &ExecutionOutcome, + block_number: BlockNumber, + ) -> B256 { + execution_outcome.receipts_root_slow(block_number).expect("Block is present") + } + + /// Builds a pending block using the configured provider and pool. /// /// If the origin is the actual pending block, the block is built with withdrawals. /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. - pub(crate) fn build_block( - self, - client: &Client, - pool: &Pool, - ) -> EthResult - where - Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - { - let Self { cfg, block_env, origin } = self; + fn build_block(&self, env: PendingBlockEnv) -> EthResult { + let PendingBlockEnv { cfg, block_env, origin } = env; let parent_hash = origin.build_target_hash(); - let state_provider = client.history_by_block_hash(parent_hash)?; + let state_provider = self.provider().history_by_block_hash(parent_hash)?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database(state).with_bundle_update().build(); @@ -67,10 +215,11 @@ impl PendingBlockEnv { let mut executed_txs = Vec::new(); let mut senders = Vec::new(); - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( - base_fee, - block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), - )); + let mut best_txs = + self.pool().best_transactions_with_attributes(BestTransactionsAttributes::new( + base_fee, + block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), + )); let (withdrawals, withdrawals_root) = match origin { PendingBlockEnvOrigin::ActualPending(ref block) => { @@ -79,19 +228,22 @@ impl PendingBlockEnv { PendingBlockEnvOrigin::DerivedFromLatest(_) => (None, None), }; - let chain_spec = client.chain_spec(); + let chain_spec = self.provider().chain_spec(); let parent_beacon_block_root = if origin.is_actual_pending() { // apply eip-4788 pre block contract call if we got the block from the CL with the real // parent beacon block root pre_block_beacon_root_contract_call( &mut db, + self.evm_config(), chain_spec.as_ref(), - block_number, &cfg, &block_env, + block_number, + block_env.timestamp.to::(), origin.header().parent_beacon_block_root, - )?; + ) + .map_err(|err| EthApiError::Internal(err.into()))?; origin.header().parent_beacon_block_root } else { None @@ -142,8 +294,11 @@ impl PendingBlockEnv { } // Configure the environment for the block. - let env = - Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx_env_with_recovered(&tx)); + let env = Env::boxed( + cfg.cfg_env.clone(), + block_env.clone(), + Self::evm_config(self).tx_env(&tx), + ); let mut evm = revm::Evm::builder().with_env(env).with_db(&mut db).build(); @@ -190,16 +345,7 @@ impl PendingBlockEnv { cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })); + receipts.push(Some(self.assemble_receipt(&tx, result, cumulative_gas_used))); // append transaction to the list of executed transactions let (tx, sender) = tx.to_components(); @@ -227,18 +373,7 @@ impl PendingBlockEnv { Vec::new(), ); - #[cfg(feature = "optimism")] - let receipts_root = execution_outcome - .optimism_receipts_root_slow( - block_number, - chain_spec.as_ref(), - block_env.timestamp.to::(), - ) - .expect("Block is present"); - - #[cfg(not(feature = "optimism"))] - let receipts_root = - execution_outcome.receipts_root_slow(block_number).expect("Block is present"); + let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Block is present"); @@ -248,7 +383,7 @@ impl PendingBlockEnv { let state_root = state_provider.state_root(execution_outcome.state())?; // create the block header - let transactions_root = proofs::calculate_transaction_root(&executed_txs); + let transactions_root = calculate_transaction_root(&executed_txs); // check if cancun is activated to set eip4844 header fields correctly let blob_gas_used = @@ -292,137 +427,3 @@ impl PendingBlockEnv { Ok(SealedBlockWithSenders { block: block.seal_slow(), senders }) } } - -/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. -/// -/// This constructs a new [Evm](revm::Evm) with the given DB, and environment -/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] to execute the pre block contract call. -/// -/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state -/// change. -fn pre_block_beacon_root_contract_call( - db: &mut DB, - chain_spec: &ChainSpec, - block_number: u64, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - parent_beacon_block_root: Option, -) -> EthResult<()> -where - DB::Error: std::fmt::Display, -{ - // apply pre-block EIP-4788 contract call - let mut evm_pre_block = revm::Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the pre block call needs the block itself - apply_beacon_root_contract_call( - chain_spec, - initialized_block_env.timestamp.to::(), - block_number, - parent_beacon_block_root, - &mut evm_pre_block, - ) - .map_err(|err| EthApiError::Internal(err.into())) -} - -/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block state transitions. -/// -/// This constructs a new [Evm](revm::Evm) with the given DB, and environment -/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`]. -/// -/// This uses [`apply_blockhashes_update`]. -fn pre_block_blockhashes_update + DatabaseCommit>( - db: &mut DB, - chain_spec: &ChainSpec, - initialized_block_env: &BlockEnv, - block_number: u64, - parent_block_hash: B256, -) -> EthResult<()> -where - DB::Error: std::fmt::Display, -{ - apply_blockhashes_update( - db, - chain_spec, - initialized_block_env.timestamp.to::(), - block_number, - parent_block_hash, - ) - .map_err(|err| EthApiError::Internal(err.into())) -} - -/// The origin for a configured [`PendingBlockEnv`] -#[derive(Clone, Debug)] -pub(crate) enum PendingBlockEnvOrigin { - /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), - /// The _modified_ header of the latest block. - /// - /// This derives the pending state based on the latest header by modifying: - /// - the timestamp - /// - the block number - /// - fees - DerivedFromLatest(SealedHeader), -} - -impl PendingBlockEnvOrigin { - /// Returns true if the origin is the actual pending block as received from the CL. - pub(crate) const fn is_actual_pending(&self) -> bool { - matches!(self, Self::ActualPending(_)) - } - - /// Consumes the type and returns the actual pending block. - pub(crate) fn into_actual_pending(self) -> Option { - match self { - Self::ActualPending(block) => Some(block), - _ => None, - } - } - - /// Returns the [`BlockId`] that represents the state of the block. - /// - /// If this is the actual pending block, the state is the "Pending" tag, otherwise we can safely - /// identify the block by its hash (latest block). - pub(crate) fn state_block_id(&self) -> BlockId { - match self { - Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), - Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), - } - } - - /// Returns the hash of the block the pending block should be built on. - /// - /// For the [`PendingBlockEnvOrigin::ActualPending`] this is the parent hash of the block. - /// For the [`PendingBlockEnvOrigin::DerivedFromLatest`] this is the hash of the _latest_ - /// header. - fn build_target_hash(&self) -> B256 { - match self { - Self::ActualPending(block) => block.parent_hash, - Self::DerivedFromLatest(header) => header.hash(), - } - } - - /// Returns the header this pending block is based on. - pub(crate) fn header(&self) -> &SealedHeader { - match self { - Self::ActualPending(block) => &block.header, - Self::DerivedFromLatest(header) => header, - } - } -} - -/// In memory pending block for `pending` tag -#[derive(Debug)] -pub(crate) struct PendingBlock { - /// The cached pending block - pub(crate) block: SealedBlockWithSenders, - /// Timestamp when the pending block is considered outdated - pub(crate) expires_at: Instant, -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs new file mode 100644 index 000000000000..5cd6c03c4d9f --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -0,0 +1,36 @@ +//! Loads a receipt from database. Helper trait for `eth_` block and transaction RPC methods, that +//! loads receipt data w.r.t. network. + +use futures::Future; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_types::AnyTransactionReceipt; + +/// Assembles transaction receipt data w.r.t to network. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. +#[auto_impl::auto_impl(&, Arc)] +pub trait LoadReceipt: Send + Sync { + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. + fn build_transaction_receipt( + &self, + tx: TransactionSigned, + meta: TransactionMeta, + receipt: Receipt, + ) -> impl Future> + Send { + async move { + // get all receipts for the block + let all_receipts = match self.cache().get_receipts(meta.block_hash).await? { + Some(recpts) => recpts, + None => return Err(EthApiError::UnknownBlockNumber), + }; + + Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs new file mode 100644 index 000000000000..2a75d9abb02e --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -0,0 +1,39 @@ +//! An abstraction over ethereum signers. + +use std::result; + +use alloy_dyn_abi::TypedData; +use dyn_clone::DynClone; +use reth_primitives::{Address, Signature, TransactionSigned}; +use reth_rpc_eth_types::SignError; +use reth_rpc_types::TypedTransactionRequest; + +/// Result returned by [`EthSigner`] methods. +pub type Result = result::Result; + +/// An Ethereum Signer used via RPC. +#[async_trait::async_trait] +pub trait EthSigner: Send + Sync + DynClone { + /// Returns the available accounts for this signer. + fn accounts(&self) -> Vec
; + + /// Returns `true` whether this signer can sign for this address + fn is_signer_for(&self, addr: &Address) -> bool { + self.accounts().contains(addr) + } + + /// Returns the signature + async fn sign(&self, address: Address, message: &[u8]) -> Result; + + /// signs a transaction request using the given account in request + fn sign_transaction( + &self, + request: TypedTransactionRequest, + address: &Address, + ) -> Result; + + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. + fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; +} + +dyn_clone::clone_trait_object!(EthSigner); diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs new file mode 100644 index 000000000000..63722e376e64 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -0,0 +1,31 @@ +//! Loads chain metadata. + +use futures::Future; +use reth_chainspec::ChainInfo; +use reth_errors::RethResult; +use reth_primitives::{Address, U64}; +use reth_rpc_types::SyncStatus; + +/// `Eth` API trait. +/// +/// Defines core functionality of the `eth` API implementation. +#[auto_impl::auto_impl(&, Arc)] +pub trait EthApiSpec: Send + Sync { + /// Returns the current ethereum protocol version. + fn protocol_version(&self) -> impl Future> + Send; + + /// Returns the chain id + fn chain_id(&self) -> U64; + + /// Returns provider chain info + fn chain_info(&self) -> RethResult; + + /// Returns a list of addresses owned by provider. + fn accounts(&self) -> Vec
; + + /// Returns `true` if the network is undergoing sync. + fn is_syncing(&self) -> bool; + + /// Returns the [`SyncStatus`] of the network + fn sync_status(&self) -> RethResult; +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs new file mode 100644 index 000000000000..4b0d629259a9 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -0,0 +1,252 @@ +//! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace +//! RPC methods. + +use futures::Future; +use reth_errors::RethError; +use reth_evm::ConfigureEvmEnv; +use reth_primitives::{Address, BlockId, Bytes, Header, B256, U256}; +use reth_provider::{BlockIdReader, StateProvider, StateProviderBox, StateProviderFactory}; +use reth_rpc_eth_types::{ + EthApiError, EthResult, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError, +}; +use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; +use reth_rpc_types_compat::proof::from_primitive_account_proof; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use revm::db::BundleState; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; + +use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; + +/// Helper methods for `eth_` methods relating to state (accounts). +pub trait EthState: LoadState + SpawnBlocking { + /// Returns the maximum number of blocks into the past for generating state proofs. + fn max_proof_window(&self) -> u64; + + /// Returns the number of transactions sent from an address at the given block identifier. + /// + /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// look up the highest transaction in pool and return the next nonce (highest + 1). + fn transaction_count( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send { + LoadState::transaction_count(self, address, block_id) + } + + /// Returns code of given account, at given blocknumber. + fn get_code( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send { + self.spawn_blocking_io(move |this| { + Ok(this + .state_at_block_id_or_latest(block_id)? + .account_code(address)? + .unwrap_or_default() + .original_bytes()) + }) + } + + /// Returns balance of given account, at given blocknumber. + fn balance( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send { + self.spawn_blocking_io(move |this| { + Ok(this + .state_at_block_id_or_latest(block_id)? + .account_balance(address)? + .unwrap_or_default()) + }) + } + + /// Returns values stored of given account, at given blocknumber. + fn storage_at( + &self, + address: Address, + index: JsonStorageKey, + block_id: Option, + ) -> impl Future> + Send { + self.spawn_blocking_io(move |this| { + Ok(B256::new( + this.state_at_block_id_or_latest(block_id)? + .storage(address, index.0)? + .unwrap_or_default() + .to_be_bytes(), + )) + }) + } + + /// Returns values stored of given account, with Merkle-proof, at given blocknumber. + fn get_proof( + &self, + address: Address, + keys: Vec, + block_id: Option, + ) -> EthResult> + Send> + where + Self: EthApiSpec, + { + let chain_info = self.chain_info()?; + let block_id = block_id.unwrap_or_default(); + + // Check whether the distance to the block exceeds the maximum configured window. + let block_number = self + .provider() + .block_number_for_id(block_id)? + .ok_or(EthApiError::UnknownBlockNumber)?; + let max_window = self.max_proof_window(); + if chain_info.best_number.saturating_sub(block_number) > max_window { + return Err(EthApiError::ExceedsMaxProofWindow) + } + + Ok(async move { + let _permit = self + .acquire_owned() + .await + .map_err(|err| EthApiError::Internal(RethError::other(err)))?; + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(block_id)?; + let storage_keys = keys.iter().map(|key| key.0).collect::>(); + let proof = state.proof(&BundleState::default(), address, &storage_keys)?; + Ok(from_primitive_account_proof(proof)) + }) + .await + }) + } +} + +/// Loads state from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. +pub trait LoadState { + /// Returns a handle for reading state from database. + /// + /// Data access in default trait method implementations. + fn provider(&self) -> impl StateProviderFactory; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns a handle for reading data from transaction pool. + /// + /// Data access in default trait method implementations. + fn pool(&self) -> impl TransactionPool; + + /// Returns the state at the given block number + fn state_at_hash(&self, block_hash: B256) -> EthResult { + Ok(self.provider().history_by_block_hash(block_hash)?) + } + + /// Returns the state at the given [`BlockId`] enum. + /// + /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this + /// will only return canonical state. See also + fn state_at_block_id(&self, at: BlockId) -> EthResult { + Ok(self.provider().state_by_block_id(at)?) + } + + /// Returns the _latest_ state + fn latest_state(&self) -> EthResult { + Ok(self.provider().latest()?) + } + + /// Returns the state at the given [`BlockId`] enum or the latest. + /// + /// Convenience function to interprets `None` as `BlockId::Number(BlockNumberOrTag::Latest)` + fn state_at_block_id_or_latest( + &self, + block_id: Option, + ) -> EthResult { + if let Some(block_id) = block_id { + self.state_at_block_id(block_id) + } else { + Ok(self.latest_state()?) + } + } + + /// Returns the revm evm env for the requested [`BlockId`] + /// + /// If the [`BlockId`] this will return the [`BlockId`] of the block the env was configured + /// for. + /// If the [`BlockId`] is pending, this will return the "Pending" tag, otherwise this returns + /// the hash of the exact block. + fn evm_env_at( + &self, + at: BlockId, + ) -> impl Future> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + if at.is_pending() { + let PendingBlockEnv { cfg, block_env, origin } = + self.pending_block_env_and_cfg()?; + Ok((cfg, block_env, origin.state_block_id())) + } else { + // Use cached values if there is no pending block + let block_hash = LoadPendingBlock::provider(self) + .block_hash_for_id(at)? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let (cfg, env) = self.cache().get_evm_env(block_hash).await?; + Ok((cfg, env, block_hash.into())) + } + } + } + + /// Returns the revm evm env for the raw block header + /// + /// This is used for tracing raw blocks + fn evm_env_for_raw_block( + &self, + header: &Header, + ) -> impl Future> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + // get the parent config first + let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; + + let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; + self.evm_config().fill_block_env(&mut block_env, header, after_merge); + + Ok((cfg, block_env)) + } + } + + /// Returns the number of transactions sent from an address at the given block identifier. + /// + /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// look up the highest transaction in pool and return the next nonce (highest + 1). + fn transaction_count( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + if block_id == Some(BlockId::pending()) { + let address_txs = this.pool().get_transactions_by_sender(address); + if let Some(highest_nonce) = + address_txs.iter().map(|item| item.transaction.nonce()).max() + { + let tx_count = highest_nonce + .checked_add(1) + .ok_or(RpcInvalidTransactionError::NonceMaxValue)?; + return Ok(U256::from(tx_count)) + } + } + + let state = this.state_at_block_id_or_latest(block_id)?; + Ok(U256::from(state.account_nonce(address)?.unwrap_or_default())) + }) + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs new file mode 100644 index 000000000000..d48e566ed51d --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -0,0 +1,415 @@ +//! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. + +use futures::Future; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::B256; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_eth_types::{ + cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, + EthApiError, EthResult, +}; +use reth_rpc_types::{BlockId, TransactionInfo}; +use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; +use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; +use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; + +use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; + +/// Executes CPU heavy tasks. +pub trait Trace: LoadState { + /// Returns a handle for reading evm config. + /// + /// Data access in default (L1) trait method implementations. + fn evm_config(&self) -> &impl ConfigureEvm; + + /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state + /// changes. + fn inspect( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> + where + DB: Database, + ::Error: Into, + I: GetInspector, + { + self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) + } + + /// Same as [`inspect`](Self::inspect) but also returns the database again. + /// + /// Even though [Database] is also implemented on `&mut` + /// this is still useful if there are certain trait bounds on the Inspector's database generic + /// type + fn inspect_and_return_db( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg, DB)> + where + DB: Database, + ::Error: Into, + I: GetInspector, + { + let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let res = evm.transact()?; + let (db, env) = evm.into_db_and_env_with_handler_cfg(); + Ok((res, env, db)) + } + + /// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the + /// config. + /// + /// The callback is then called with the [`TracingInspector`] and the [`ResultAndState`] after + /// the configured [`EnvWithHandlerCfg`] was inspected. + /// + /// Caution: this is blocking + fn trace_at( + &self, + env: EnvWithHandlerCfg, + config: TracingInspectorConfig, + at: BlockId, + f: F, + ) -> EthResult + where + Self: Call, + F: FnOnce(TracingInspector, ResultAndState) -> EthResult, + { + self.with_state_at_block(at, |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut inspector = TracingInspector::new(config); + let (res, _) = self.inspect(&mut db, env, &mut inspector)?; + f(inspector, res) + }) + } + + /// Same as [`trace_at`](Self::trace_at) but also provides the used database to the callback. + /// + /// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the + /// config. + /// + /// The callback is then called with the [`TracingInspector`] and the [`ResultAndState`] after + /// the configured [`EnvWithHandlerCfg`] was inspected. + fn spawn_trace_at_with_state( + &self, + env: EnvWithHandlerCfg, + config: TracingInspectorConfig, + at: BlockId, + f: F, + ) -> impl Future> + Send + where + Self: LoadPendingBlock + Call, + F: FnOnce(TracingInspector, ResultAndState, StateCacheDb<'_>) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + let this = self.clone(); + self.spawn_with_state_at_block(at, move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut inspector = TracingInspector::new(config); + let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + f(inspector, res, db) + }) + } + + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed + /// and the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool). + fn spawn_trace_transaction_in_block( + &self, + hash: B256, + config: TracingInspectorConfig, + f: F, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + LoadTransaction + Call, + F: FnOnce( + TransactionInfo, + TracingInspector, + ResultAndState, + StateCacheDb<'_>, + ) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + self.spawn_trace_transaction_in_block_with_inspector(hash, TracingInspector::new(config), f) + } + + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed + /// and the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool). + fn spawn_trace_transaction_in_block_with_inspector( + &self, + hash: B256, + mut inspector: Insp, + f: F, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + LoadTransaction + Call, + F: FnOnce(TransactionInfo, Insp, ResultAndState, StateCacheDb<'_>) -> EthResult + + Send + + 'static, + Insp: for<'a, 'b> Inspector> + Send + 'static, + R: Send + 'static, + { + async move { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg, + block_env, + Call::evm_config(&this).tx_env(&tx), + ); + let (res, _) = + this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + f(tx_info, inspector, res, db) + }) + .await + .map(Some) + } + } + + /// Executes all transactions of a block up to a given index. + /// + /// If a `highest_index` is given, this will only execute the first `highest_index` + /// transactions, in other words, it will stop executing transactions after the + /// `highest_index`th transaction. If `highest_index` is `None`, all transactions + /// are executed. + fn trace_block_until( + &self, + block_id: BlockId, + highest_index: Option, + config: TracingInspectorConfig, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + F: Fn( + TransactionInfo, + TracingInspector, + ExecutionResult, + &EvmState, + &StateCacheDb<'_>, + ) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + self.trace_block_until_with_inspector( + block_id, + highest_index, + move || TracingInspector::new(config), + f, + ) + } + + /// Executes all transactions of a block. + /// + /// If a `highest_index` is given, this will only execute the first `highest_index` + /// transactions, in other words, it will stop executing transactions after the + /// `highest_index`th transaction. + /// + /// Note: This expect tx index to be 0-indexed, so the first transaction is at index 0. + /// + /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing + /// the transactions. + fn trace_block_until_with_inspector( + &self, + block_id: BlockId, + highest_index: Option, + mut inspector_setup: Setup, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + + Send + + 'static, + Setup: FnMut() -> Insp + Send + 'static, + Insp: for<'a, 'b> Inspector> + Send + 'static, + R: Send + 'static, + { + async move { + let ((cfg, block_env, _), block) = + futures::try_join!(self.evm_env_at(block_id), self.block_with_senders(block_id))?; + + let Some(block) = block else { return Ok(None) }; + + if block.body.is_empty() { + // nothing to trace + return Ok(Some(Vec::new())) + } + + // replay all transactions of the block + self.spawn_tracing(move |this| { + // we need to get the state of the parent block because we're replaying this block + // on top of its parent block's state + let state_at = block.parent_hash; + let block_hash = block.hash(); + + let block_number = block_env.number.saturating_to::(); + let base_fee = block_env.basefee.saturating_to::(); + + // prepare transactions, we do everything upfront to reduce time spent with open + // state + let max_transactions = highest_index.map_or(block.body.len(), |highest| { + // we need + 1 because the index is 0-based + highest as usize + 1 + }); + let mut results = Vec::with_capacity(max_transactions); + + let mut transactions = block + .into_transactions_ecrecovered() + .take(max_transactions) + .enumerate() + .map(|(idx, tx)| { + let tx_info = TransactionInfo { + hash: Some(tx.hash()), + index: Some(idx as u64), + block_hash: Some(block_hash), + block_number: Some(block_number), + base_fee: Some(base_fee), + }; + let tx_env = Trace::evm_config(&this).tx_env(&tx); + (tx_info, tx_env) + }) + .peekable(); + + // now get the state + let state = this.state_at_block_id(state_at.into())?; + let mut db = + CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + + while let Some((tx_info, tx)) = transactions.next() { + let env = + EnvWithHandlerCfg::new_with_cfg_env(cfg.clone(), block_env.clone(), tx); + + let mut inspector = inspector_setup(); + let (res, _) = + this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let ResultAndState { result, state } = res; + results.push(f(tx_info, inspector, result, &state, &db)?); + + // need to apply the state changes of this transaction before executing the + // next transaction, but only if there's a next transaction + if transactions.peek().is_some() { + // commit the state changes to the DB + db.commit(state) + } + } + + Ok(Some(results)) + }) + .await + } + } + + /// Executes all transactions of a block and returns a list of callback results invoked for each + /// transaction in the block. + /// + /// This + /// 1. fetches all transactions of the block + /// 2. configures the EVM evn + /// 3. loops over all transactions and executes them + /// 4. calls the callback with the transaction info, the execution result, the changed state + /// _after_ the transaction [`StateProviderDatabase`] and the database that points to the + /// state right _before_ the transaction. + fn trace_block_with( + &self, + block_id: BlockId, + config: TracingInspectorConfig, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + // This is the callback that's invoked for each transaction with the inspector, the result, + // state and db + F: Fn( + TransactionInfo, + TracingInspector, + ExecutionResult, + &EvmState, + &StateCacheDb<'_>, + ) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + self.trace_block_until(block_id, None, config, f) + } + + /// Executes all transactions of a block and returns a list of callback results invoked for each + /// transaction in the block. + /// + /// This + /// 1. fetches all transactions of the block + /// 2. configures the EVM evn + /// 3. loops over all transactions and executes them + /// 4. calls the callback with the transaction info, the execution result, the changed state + /// _after_ the transaction [`EvmState`] and the database that points to the state right + /// _before_ the transaction, in other words the state the transaction was executed on: + /// `changed_state = tx(cached_state)` + /// + /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing + /// a transaction. This is invoked for each transaction. + fn trace_block_inspector( + &self, + block_id: BlockId, + insp_setup: Setup, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + // This is the callback that's invoked for each transaction with the inspector, the result, + // state and db + F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + + Send + + 'static, + Setup: FnMut() -> Insp + Send + 'static, + Insp: for<'a, 'b> Inspector> + Send + 'static, + R: Send + 'static, + { + self.trace_block_until_with_inspector(block_id, None, insp_setup, f) + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs new file mode 100644 index 000000000000..73355b47a781 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -0,0 +1,668 @@ +//! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. +//! network. + +use std::{fmt, ops::Deref, sync::Arc}; + +use alloy_dyn_abi::TypedData; +use futures::Future; +use reth_primitives::{ + Address, BlockId, Bytes, FromRecoveredPooledTransaction, IntoRecoveredTransaction, Receipt, + SealedBlockWithSenders, TransactionMeta, TransactionSigned, TxHash, TxKind, B256, U256, +}; +use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_rpc_eth_types::{ + utils::recover_raw_transaction, EthApiError, EthResult, EthStateCache, SignError, + TransactionSource, +}; +use reth_rpc_types::{ + transaction::{ + EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, + LegacyTransactionRequest, + }, + AnyTransactionReceipt, Transaction, TransactionRequest, TypedTransactionRequest, +}; +use reth_rpc_types_compat::transaction::from_recovered_with_block_context; +use reth_transaction_pool::{TransactionOrigin, TransactionPool}; + +use super::EthSigner; + +use super::{Call, EthApiSpec, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking}; + +/// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in +/// the `eth_` namespace. +/// +/// This includes utilities for transaction tracing, transacting and inspection. +/// +/// Async functions that are spawned onto the +/// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool) begin with `spawn_` +/// +/// ## Calls +/// +/// There are subtle differences between when transacting [`TransactionRequest`]: +/// +/// The endpoints `eth_call` and `eth_estimateGas` and `eth_createAccessList` should always +/// __disable__ the base fee check in the +/// [`EnvWithHandlerCfg`](revm_primitives::CfgEnvWithHandlerCfg). +/// +/// The behaviour for tracing endpoints is not consistent across clients. +/// Geth also disables the basefee check for tracing: +/// Erigon does not: +/// +/// See also +/// +/// This implementation follows the behaviour of Geth and disables the basefee check for tracing. +pub trait EthTransactions: LoadTransaction { + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl BlockReaderIdExt; + + /// Returns a handle for forwarding received raw transactions. + /// + /// Access to transaction forwarder in default (L1) trait method implementations. + fn raw_tx_forwarder(&self) -> Option>; + + /// Returns a handle for signing data. + /// + /// Singer access in default (L1) trait method implementations. + fn signers(&self) -> &parking_lot::RwLock>>; + + /// Returns the transaction by hash. + /// + /// Checks the pool and state. + /// + /// Returns `Ok(None)` if no matching transaction was found. + fn transaction_by_hash( + &self, + hash: B256, + ) -> impl Future>> + Send { + LoadTransaction::transaction_by_hash(self, hash) + } + + /// Get all transactions in the block with the given hash. + /// + /// Returns `None` if block does not exist. + fn transactions_by_block( + &self, + block: B256, + ) -> impl Future>>> + Send { + async move { Ok(self.cache().get_block_transactions(block).await?) } + } + + /// Returns the EIP-2718 encoded transaction by hash. + /// + /// If this is a pooled EIP-4844 transaction, the blob sidecar is included. + /// + /// Checks the pool and state. + /// + /// Returns `Ok(None)` if no matching transaction was found. + fn raw_transaction_by_hash( + &self, + hash: B256, + ) -> impl Future>> + Send { + async move { + // Note: this is mostly used to fetch pooled transactions so we check the pool first + if let Some(tx) = + self.pool().get_pooled_transaction_element(hash).map(|tx| tx.envelope_encoded()) + { + return Ok(Some(tx)) + } + + self.spawn_blocking_io(move |ref this| { + Ok(LoadTransaction::provider(this) + .transaction_by_hash(hash)? + .map(|tx| tx.envelope_encoded())) + }) + .await + } + } + + /// Returns the _historical_ transaction and the block it was mined in + fn historical_transaction_by_hash_at( + &self, + hash: B256, + ) -> impl Future>> + Send { + async move { + match self.transaction_by_hash_at(hash).await? { + None => Ok(None), + Some((tx, at)) => Ok(at.as_block_hash().map(|hash| (tx, hash))), + } + } + } + + /// Returns the transaction receipt for the given hash. + /// + /// Returns None if the transaction does not exist or is pending + /// Note: The tx receipt is not available for pending transactions. + fn transaction_receipt( + &self, + hash: B256, + ) -> impl Future>> + Send + where + Self: LoadReceipt + 'static, + { + async move { + let result = self.load_transaction_and_receipt(hash).await?; + + let (tx, meta, receipt) = match result { + Some((tx, meta, receipt)) => (tx, meta, receipt), + None => return Ok(None), + }; + + self.build_transaction_receipt(tx, meta, receipt).await.map(Some) + } + } + + /// Helper method that loads a transaction and its receipt. + fn load_transaction_and_receipt( + &self, + hash: TxHash, + ) -> impl Future>> + Send + where + Self: 'static, + { + let this = self.clone(); + self.spawn_blocking_io(move |_| { + let (tx, meta) = + match LoadTransaction::provider(&this).transaction_by_hash_with_meta(hash)? { + Some((tx, meta)) => (tx, meta), + None => return Ok(None), + }; + + let receipt = match EthTransactions::provider(&this).receipt_by_hash(hash)? { + Some(recpt) => recpt, + None => return Ok(None), + }; + + Ok(Some((tx, meta, receipt))) + }) + } + + /// Get [`Transaction`] by [`BlockId`] and index of transaction within that block. + /// + /// Returns `Ok(None)` if the block does not exist, or index is out of range. + fn transaction_by_block_and_tx_index( + &self, + block_id: BlockId, + index: usize, + ) -> impl Future>> + Send + where + Self: LoadBlock, + { + async move { + if let Some(block) = self.block_with_senders(block_id).await? { + let block_hash = block.hash(); + let block_number = block.number; + let base_fee_per_gas = block.base_fee_per_gas; + if let Some(tx) = block.into_transactions_ecrecovered().nth(index) { + return Ok(Some(from_recovered_with_block_context( + tx, + block_hash, + block_number, + base_fee_per_gas, + index, + ))) + } + } + + Ok(None) + } + } + + /// Get transaction, as raw bytes, by [`BlockId`] and index of transaction within that block. + /// + /// Returns `Ok(None)` if the block does not exist, or index is out of range. + fn raw_transaction_by_block_and_tx_index( + &self, + block_id: BlockId, + index: usize, + ) -> impl Future>> + Send + where + Self: LoadBlock, + { + async move { + if let Some(block) = self.block_with_senders(block_id).await? { + if let Some(tx) = block.transactions().nth(index) { + return Ok(Some(tx.envelope_encoded())) + } + } + + Ok(None) + } + } + + /// Decodes and recovers the transaction and submits it to the pool. + /// + /// Returns the hash of the transaction. + fn send_raw_transaction(&self, tx: Bytes) -> impl Future> + Send { + async move { + // On optimism, transactions are forwarded directly to the sequencer to be included in + // blocks that it builds. + if let Some(client) = self.raw_tx_forwarder().as_ref() { + tracing::debug!( target: "rpc::eth", "forwarding raw transaction to"); + client.forward_raw_transaction(&tx).await?; + } + + let recovered = recover_raw_transaction(tx)?; + let pool_transaction = + ::Transaction::from_recovered_pooled_transaction( + recovered, + ); + + // submit the transaction to the pool with a `Local` origin + let hash = + self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; + + Ok(hash) + } + } + + /// Signs transaction with a matching signer, if any and submits the transaction to the pool. + /// Returns the hash of the signed transaction. + fn send_transaction( + &self, + mut request: TransactionRequest, + ) -> impl Future> + Send + where + Self: EthApiSpec + LoadBlock + LoadPendingBlock + LoadFee + Call, + { + async move { + let from = match request.from { + Some(from) => from, + None => return Err(SignError::NoAccount.into()), + }; + + // set nonce if not already set before + if request.nonce.is_none() { + let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; + // note: `.to()` can't panic because the nonce is constructed from a `u64` + request.nonce = Some(nonce.to::()); + } + + let chain_id = self.chain_id(); + + let estimated_gas = + self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; + let gas_limit = estimated_gas; + + let TransactionRequest { + to, + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + gas, + value, + input: data, + nonce, + mut access_list, + max_fee_per_blob_gas, + blob_versioned_hashes, + sidecar, + .. + } = request; + + // todo: remove this inlining after https://github.com/alloy-rs/alloy/pull/183#issuecomment-1928161285 + let transaction = match ( + gas_price, + max_fee_per_gas, + access_list.take(), + max_fee_per_blob_gas, + blob_versioned_hashes, + sidecar, + ) { + // legacy transaction + // gas price required + (Some(_), None, None, None, None, None) => { + Some(TypedTransactionRequest::Legacy(LegacyTransactionRequest { + nonce: nonce.unwrap_or_default(), + gas_price: U256::from(gas_price.unwrap_or_default()), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + kind: to.unwrap_or(TxKind::Create), + chain_id: None, + })) + } + // EIP2930 + // if only accesslist is set, and no eip1599 fees + (_, None, Some(access_list), None, None, None) => { + Some(TypedTransactionRequest::EIP2930(EIP2930TransactionRequest { + nonce: nonce.unwrap_or_default(), + gas_price: U256::from(gas_price.unwrap_or_default()), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + kind: to.unwrap_or(TxKind::Create), + chain_id: 0, + access_list, + })) + } + // EIP1559 + // if 4844 fields missing + // gas_price, max_fee_per_gas, access_list, max_fee_per_blob_gas, + // blob_versioned_hashes, sidecar, + (None, _, _, None, None, None) => { + // Empty fields fall back to the canonical transaction schema. + Some(TypedTransactionRequest::EIP1559(EIP1559TransactionRequest { + nonce: nonce.unwrap_or_default(), + max_fee_per_gas: U256::from(max_fee_per_gas.unwrap_or_default()), + max_priority_fee_per_gas: U256::from( + max_priority_fee_per_gas.unwrap_or_default(), + ), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + kind: to.unwrap_or(TxKind::Create), + chain_id: 0, + access_list: access_list.unwrap_or_default(), + })) + } + // EIP4884 + // all blob fields required + ( + None, + _, + _, + Some(max_fee_per_blob_gas), + Some(blob_versioned_hashes), + Some(sidecar), + ) => { + // As per the EIP, we follow the same semantics as EIP-1559. + Some(TypedTransactionRequest::EIP4844(EIP4844TransactionRequest { + chain_id: 0, + nonce: nonce.unwrap_or_default(), + max_priority_fee_per_gas: U256::from( + max_priority_fee_per_gas.unwrap_or_default(), + ), + max_fee_per_gas: U256::from(max_fee_per_gas.unwrap_or_default()), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + #[allow(clippy::manual_unwrap_or_default)] // clippy is suggesting here unwrap_or_default + to: match to { + Some(TxKind::Call(to)) => to, + _ => Address::default(), + }, + access_list: access_list.unwrap_or_default(), + + // eip-4844 specific. + max_fee_per_blob_gas: U256::from(max_fee_per_blob_gas), + blob_versioned_hashes, + sidecar, + })) + } + + _ => None, + }; + + let transaction = match transaction { + Some(TypedTransactionRequest::Legacy(mut req)) => { + req.chain_id = Some(chain_id.to()); + req.gas_limit = gas_limit.saturating_to(); + req.gas_price = self.legacy_gas_price(gas_price.map(U256::from)).await?; + + TypedTransactionRequest::Legacy(req) + } + Some(TypedTransactionRequest::EIP2930(mut req)) => { + req.chain_id = chain_id.to(); + req.gas_limit = gas_limit.saturating_to(); + req.gas_price = self.legacy_gas_price(gas_price.map(U256::from)).await?; + + TypedTransactionRequest::EIP2930(req) + } + Some(TypedTransactionRequest::EIP1559(mut req)) => { + let (max_fee_per_gas, max_priority_fee_per_gas) = self + .eip1559_fees( + max_fee_per_gas.map(U256::from), + max_priority_fee_per_gas.map(U256::from), + ) + .await?; + + req.chain_id = chain_id.to(); + req.gas_limit = gas_limit.saturating_to(); + req.max_fee_per_gas = max_fee_per_gas.saturating_to(); + req.max_priority_fee_per_gas = max_priority_fee_per_gas.saturating_to(); + + TypedTransactionRequest::EIP1559(req) + } + Some(TypedTransactionRequest::EIP4844(mut req)) => { + let (max_fee_per_gas, max_priority_fee_per_gas) = self + .eip1559_fees( + max_fee_per_gas.map(U256::from), + max_priority_fee_per_gas.map(U256::from), + ) + .await?; + + req.max_fee_per_gas = max_fee_per_gas; + req.max_priority_fee_per_gas = max_priority_fee_per_gas; + req.max_fee_per_blob_gas = + self.eip4844_blob_fee(max_fee_per_blob_gas.map(U256::from)).await?; + + req.chain_id = chain_id.to(); + req.gas_limit = gas_limit; + + TypedTransactionRequest::EIP4844(req) + } + None => return Err(EthApiError::ConflictingFeeFieldsInRequest), + }; + + let signed_tx = self.sign_request(&from, transaction)?; + + let recovered = + signed_tx.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; + + let pool_transaction = match recovered.try_into() { + Ok(converted) => <::Pool as TransactionPool>::Transaction::from_recovered_pooled_transaction(converted), + Err(_) => return Err(EthApiError::TransactionConversionError), + }; + + // submit the transaction to the pool with a `Local` origin + let hash = LoadTransaction::pool(self) + .add_transaction(TransactionOrigin::Local, pool_transaction) + .await?; + + Ok(hash) + } + } + + /// Signs a transaction, with configured signers. + fn sign_request( + &self, + from: &Address, + request: TypedTransactionRequest, + ) -> EthResult { + for signer in self.signers().read().iter() { + if signer.is_signer_for(from) { + return match signer.sign_transaction(request, from) { + Ok(tx) => Ok(tx), + Err(e) => Err(e.into()), + } + } + } + Err(EthApiError::InvalidTransactionSignature) + } + + /// Signs given message. Returns the signature. + fn sign( + &self, + account: Address, + message: Bytes, + ) -> impl Future> + Send { + async move { Ok(self.find_signer(&account)?.sign(account, &message).await?.to_hex_bytes()) } + } + + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. + fn sign_typed_data(&self, data: &TypedData, account: Address) -> EthResult { + Ok(self.find_signer(&account)?.sign_typed_data(account, data)?.to_hex_bytes()) + } + + /// Returns the signer for the given account, if found in configured signers. + fn find_signer(&self, account: &Address) -> Result, SignError> { + self.signers() + .read() + .iter() + .find(|signer| signer.is_signer_for(account)) + .map(|signer| dyn_clone::clone_box(&**signer)) + .ok_or(SignError::NoAccount) + } +} + +/// Loads a transaction from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC +/// methods. +pub trait LoadTransaction: SpawnBlocking { + /// Transaction pool with pending transactions. [`TransactionPool::Transaction`] is the + /// supported transaction type. + type Pool: TransactionPool; + + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl TransactionsProvider; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns a handle for reading data from pool. + /// + /// Data access in default (L1) trait method implementations. + fn pool(&self) -> &Self::Pool; + + /// Returns the transaction by hash. + /// + /// Checks the pool and state. + /// + /// Returns `Ok(None)` if no matching transaction was found. + fn transaction_by_hash( + &self, + hash: B256, + ) -> impl Future>> + Send { + async move { + // Try to find the transaction on disk + let mut resp = self + .spawn_blocking_io(move |this| { + match this.provider().transaction_by_hash_with_meta(hash)? { + None => Ok(None), + Some((tx, meta)) => { + // Note: we assume this transaction is valid, because it's mined (or + // part of pending block) and already. We don't need to + // check for pre EIP-2 because this transaction could be pre-EIP-2. + let transaction = tx + .into_ecrecovered_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature)?; + + let tx = TransactionSource::Block { + transaction, + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + }; + Ok(Some(tx)) + } + } + }) + .await?; + + if resp.is_none() { + // tx not found on disk, check pool + if let Some(tx) = + self.pool().get(&hash).map(|tx| tx.transaction.to_recovered_transaction()) + { + resp = Some(TransactionSource::Pool(tx)); + } + } + + Ok(resp) + } + } + + /// Returns the transaction by including its corresponding [`BlockId`]. + /// + /// Note: this supports pending transactions + fn transaction_by_hash_at( + &self, + transaction_hash: B256, + ) -> impl Future>> + Send { + async move { + match self.transaction_by_hash(transaction_hash).await? { + None => Ok(None), + Some(tx) => { + let res = match tx { + tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), + TransactionSource::Block { + transaction, + index, + block_hash, + block_number, + base_fee, + } => { + let at = BlockId::Hash(block_hash.into()); + let tx = TransactionSource::Block { + transaction, + index, + block_hash, + block_number, + base_fee, + }; + (tx, at) + } + }; + Ok(Some(res)) + } + } + } + } + + /// Fetches the transaction and the transaction's block + fn transaction_and_block( + &self, + hash: B256, + ) -> impl Future>> + Send + { + async move { + let (transaction, at) = match self.transaction_by_hash_at(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + + // Note: this is always either hash or pending + let block_hash = match at { + BlockId::Hash(hash) => hash.block_hash, + _ => return Ok(None), + }; + let block = self.cache().get_block_with_senders(block_hash).await?; + Ok(block.map(|block| (transaction, block.seal(block_hash)))) + } + } +} + +/// A trait that allows for forwarding raw transactions. +/// +/// For example to a sequencer. +#[async_trait::async_trait] +pub trait RawTransactionForwarder: fmt::Debug + Send + Sync + 'static { + /// Forwards raw transaction bytes for `eth_sendRawTransaction` + async fn forward_raw_transaction(&self, raw: &[u8]) -> EthResult<()>; +} + +/// Configure server's forwarder for `eth_sendRawTransaction`, at runtime. +pub trait UpdateRawTxForwarder { + /// Sets a forwarder for `eth_sendRawTransaction` + /// + /// Note: this might be removed in the future in favor of a more generic approach. + fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc); +} + +impl UpdateRawTxForwarder for T +where + T: Deref>, + K: UpdateRawTxForwarder, +{ + fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { + self.deref().deref().set_eth_raw_transaction_forwarder(forwarder); + } +} diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs new file mode 100644 index 000000000000..1aed94d5cc6e --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -0,0 +1,33 @@ +//! Reth RPC `eth_` API implementation +//! +//! ## Feature Flags +//! +//! - `client`: Enables JSON-RPC client support. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod bundle; +pub mod core; +pub mod filter; +pub mod helpers; +pub mod pubsub; + +pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; +pub use core::{EthApiServer, FullEthApiServer}; +pub use filter::EthFilterApiServer; +pub use pubsub::EthPubSubApiServer; + +pub use helpers::transaction::RawTransactionForwarder; + +#[cfg(feature = "client")] +pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; +#[cfg(feature = "client")] +pub use core::EthApiClient; +#[cfg(feature = "client")] +pub use filter::EthFilterApiClient; diff --git a/crates/rpc/rpc-api/src/eth_pubsub.rs b/crates/rpc/rpc-eth-api/src/pubsub.rs similarity index 92% rename from crates/rpc/rpc-api/src/eth_pubsub.rs rename to crates/rpc/rpc-eth-api/src/pubsub.rs index eaa1ef2d817e..8de125152823 100644 --- a/crates/rpc/rpc-api/src/eth_pubsub.rs +++ b/crates/rpc/rpc-eth-api/src/pubsub.rs @@ -1,3 +1,5 @@ +//! `eth_` RPC API for pubsub subscription. + use jsonrpsee::proc_macros::rpc; use reth_rpc_types::pubsub::{Params, SubscriptionKind}; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml new file mode 100644 index 000000000000..b1c307191025 --- /dev/null +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -0,0 +1,68 @@ +[package] +name = "reth-rpc-eth-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Types supporting implementation of 'eth' namespace RPC server API" + +[lints] +workspace = true + +[dependencies] +reth-chainspec.workspace = true +reth-errors.workspace = true +reth-evm.workspace = true +reth-execution-types.workspace = true +reth-metrics.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-rpc-server-types.workspace = true +reth-rpc-types.workspace = true +reth-rpc-types-compat.workspace = true +reth-tasks.workspace = true +reth-transaction-pool.workspace = true +reth-trie.workspace = true + +# ethereum +alloy-sol-types.workspace = true +revm.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } +revm-primitives = { workspace = true, features = ["dev"] } + +# rpc +jsonrpsee-core.workspace = true +jsonrpsee-types.workspace = true + +# async +futures.workspace = true +tokio.workspace = true +tokio-stream.workspace = true + +# metrics +metrics.workspace = true + +# misc +serde = { workspace = true, features = ["derive"] } +thiserror.workspace = true +derive_more.workspace = true +schnellru.workspace = true +rand.workspace = true +tracing.workspace = true + +[dev-dependencies] +serde_json.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-revm/optimism", + "reth-chainspec/optimism", + "reth-execution-types/optimism", + "reth-revm/optimism", + "revm/optimism" +] \ No newline at end of file diff --git a/crates/rpc/rpc/src/eth/cache/config.rs b/crates/rpc/rpc-eth-types/src/cache/config.rs similarity index 71% rename from crates/rpc/rpc/src/eth/cache/config.rs rename to crates/rpc/rpc-eth-types/src/cache/config.rs index 5dc989e8e637..64999bd6bf3e 100644 --- a/crates/rpc/rpc/src/eth/cache/config.rs +++ b/crates/rpc/rpc-eth-types/src/cache/config.rs @@ -1,8 +1,14 @@ -use reth_rpc_server_types::constants::cache::*; +//! Configuration for RPC cache. + use serde::{Deserialize, Serialize}; -/// Settings for the [`EthStateCache`](crate::eth::cache::EthStateCache). -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +use reth_rpc_server_types::constants::cache::{ + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_RECEIPT_CACHE_MAX_LEN, +}; + +/// Settings for the [`EthStateCache`](super::EthStateCache). +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EthStateCacheConfig { /// Max number of blocks in cache. diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs new file mode 100644 index 000000000000..0370f5e600da --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -0,0 +1,172 @@ +//! Helper types to workaround 'higher-ranked lifetime error' +//! in default implementation of +//! `reth_rpc_eth_api::helpers::Call`. + +use reth_primitives::{B256, U256}; +use reth_provider::StateProvider; +use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; +use revm::Database; + +/// Helper alias type for the state's [`CacheDB`] +pub type StateCacheDb<'a> = CacheDB>>; + +/// Hack to get around 'higher-ranked lifetime error', see +/// +#[allow(missing_debug_implementations)] +pub struct StateProviderTraitObjWrapper<'a>(pub &'a dyn StateProvider); + +impl<'a> reth_provider::StateRootProvider for StateProviderTraitObjWrapper<'a> { + fn state_root( + &self, + bundle_state: &revm::db::BundleState, + ) -> reth_errors::ProviderResult { + self.0.state_root(bundle_state) + } + + fn state_root_with_updates( + &self, + bundle_state: &revm::db::BundleState, + ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { + self.0.state_root_with_updates(bundle_state) + } +} + +impl<'a> reth_provider::StateProofProvider for StateProviderTraitObjWrapper<'a> { + fn proof( + &self, + state: &revm::db::BundleState, + address: revm_primitives::Address, + slots: &[B256], + ) -> reth_errors::ProviderResult { + self.0.proof(state, address, slots) + } +} + +impl<'a> reth_provider::AccountReader for StateProviderTraitObjWrapper<'a> { + fn basic_account( + &self, + address: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.basic_account(address) + } +} + +impl<'a> reth_provider::BlockHashReader for StateProviderTraitObjWrapper<'a> { + fn block_hash( + &self, + block_number: reth_primitives::BlockNumber, + ) -> reth_errors::ProviderResult> { + self.0.block_hash(block_number) + } + + fn canonical_hashes_range( + &self, + start: reth_primitives::BlockNumber, + end: reth_primitives::BlockNumber, + ) -> reth_errors::ProviderResult> { + self.0.canonical_hashes_range(start, end) + } + + fn convert_block_hash( + &self, + hash_or_number: reth_rpc_types::BlockHashOrNumber, + ) -> reth_errors::ProviderResult> { + self.0.convert_block_hash(hash_or_number) + } +} + +impl<'a> StateProvider for StateProviderTraitObjWrapper<'a> { + fn account_balance( + &self, + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_balance(addr) + } + + fn account_code( + &self, + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_code(addr) + } + + fn account_nonce( + &self, + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_nonce(addr) + } + + fn bytecode_by_hash( + &self, + code_hash: B256, + ) -> reth_errors::ProviderResult> { + self.0.bytecode_by_hash(code_hash) + } + + fn storage( + &self, + account: revm_primitives::Address, + storage_key: reth_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { + self.0.storage(account, storage_key) + } +} + +/// Hack to get around 'higher-ranked lifetime error', see +/// +#[allow(missing_debug_implementations)] +pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>); + +impl<'a, 'b> Database for StateCacheDbRefMutWrapper<'a, 'b> { + type Error = as Database>::Error; + fn basic( + &mut self, + address: revm_primitives::Address, + ) -> Result, Self::Error> { + self.0.basic(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.0.code_by_hash(code_hash) + } + + fn storage( + &mut self, + address: revm_primitives::Address, + index: U256, + ) -> Result { + self.0.storage(address, index) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.0.block_hash(number) + } +} + +impl<'a, 'b> DatabaseRef for StateCacheDbRefMutWrapper<'a, 'b> { + type Error = as Database>::Error; + + fn basic_ref( + &self, + address: revm_primitives::Address, + ) -> Result, Self::Error> { + self.0.basic_ref(address) + } + + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + self.0.code_by_hash_ref(code_hash) + } + + fn storage_ref( + &self, + address: revm_primitives::Address, + index: U256, + ) -> Result { + self.0.storage_ref(address, index) + } + + fn block_hash_ref(&self, number: u64) -> Result { + self.0.block_hash_ref(number) + } +} diff --git a/crates/rpc/rpc/src/eth/cache/metrics.rs b/crates/rpc/rpc-eth-types/src/cache/metrics.rs similarity index 93% rename from crates/rpc/rpc/src/eth/cache/metrics.rs rename to crates/rpc/rpc-eth-types/src/cache/metrics.rs index c9b18a299da3..d87a35e03170 100644 --- a/crates/rpc/rpc/src/eth/cache/metrics.rs +++ b/crates/rpc/rpc-eth-types/src/cache/metrics.rs @@ -1,3 +1,5 @@ +//! Tracks state of RPC cache. + use metrics::Counter; use reth_metrics::{metrics::Gauge, Metrics}; diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs similarity index 98% rename from crates/rpc/rpc/src/eth/cache/mod.rs rename to crates/rpc/rpc-eth-types/src/cache/mod.rs index 70c67222e081..762322235cae 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -3,13 +3,13 @@ use futures::{future::Either, Stream, StreamExt}; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::ConfigureEvm; +use reth_execution_types::Chain; use reth_primitives::{ Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, TransactionSigned, TransactionSignedEcRecovered, B256, }; use reth_provider::{ - BlockReader, CanonStateNotification, Chain, EvmEnvProvider, StateProviderFactory, - TransactionVariant, + BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory, TransactionVariant, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; @@ -26,13 +26,12 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; -mod config; -pub use config::*; +use super::{EthStateCacheConfig, MultiConsumerLruCache}; -mod metrics; - -mod multi_consumer; -pub use multi_consumer::MultiConsumerLruCache; +pub mod config; +pub mod db; +pub mod metrics; +pub mod multi_consumer; /// The type that can send the response to a requested [Block] type BlockTransactionsResponseSender = @@ -107,7 +106,7 @@ impl EthStateCache { ) -> Self where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, { Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) } @@ -125,7 +124,7 @@ impl EthStateCache { where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, { let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = config; @@ -277,7 +276,7 @@ impl EthStateCache { /// handles messages and does LRU lookups and never blocking IO. /// /// Caution: The channel for the data is _unbounded_ it is assumed that this is mainly used by the -/// [`EthApi`](crate::EthApi) which is typically invoked by the RPC server, which already uses +/// `reth_rpc::EthApi` which is typically invoked by the RPC server, which already uses /// permits to limit concurrent requests. #[must_use = "Type does nothing unless spawned"] pub(crate) struct EthStateCacheService< @@ -316,7 +315,7 @@ impl EthStateCacheService>) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { @@ -403,7 +402,7 @@ impl Future for EthStateCacheService where K: Hash + Eq, L: Limiter, { - /// The LRU cache for the + /// The LRU cache. cache: LruMap, - /// All queued consumers + /// All queued consumers. queued: HashMap>, /// Cache metrics metrics: CacheMetrics, diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc-eth-types/src/error.rs similarity index 93% rename from crates/rpc/rpc/src/eth/error.rs rename to crates/rpc/rpc-eth-types/src/error.rs index 4a9b6d043989..95d989a19a81 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -1,10 +1,13 @@ //! Implementation specific Errors for the `eth_` namespace. -use crate::result::{internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code}; +use std::time::Duration; + use alloy_sol_types::decode_revert_reason; -use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_errors::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes}; +use reth_rpc_server_types::result::{ + internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, +}; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, }; @@ -14,7 +17,6 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; -use std::time::Duration; /// Result alias pub type EthResult = Result; @@ -52,6 +54,9 @@ pub enum EthApiError { /// When an invalid block range is provided #[error("invalid block range")] InvalidBlockRange, + /// Thrown when the target block for proof computation exceeds the maximum configured window. + #[error("distance to target block exceeds maximum proof window")] + ExceedsMaxProofWindow, /// An internal error where prevrandao is not set in the evm's environment #[error("prevrandao not in the EVM's environment after merge")] PrevrandaoNotSet, @@ -113,6 +118,9 @@ pub enum EthApiError { /// Evm generic purpose error. #[error("Revm error: {0}")] EvmCustom(String), + /// Evm precompile error + #[error("Revm precompile error: {0}")] + EvmPrecompile(String), /// Error encountered when converting a transaction type #[error("Transaction conversion error")] TransactionConversionError, @@ -131,13 +139,14 @@ impl EthApiError { } } -impl From for ErrorObject<'static> { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: EthApiError) -> Self { match error { EthApiError::FailedToDecodeSignedTransaction | EthApiError::InvalidTransactionSignature | EthApiError::EmptyRawTransactionData | EthApiError::InvalidBlockRange | + EthApiError::ExceedsMaxProofWindow | EthApiError::ConflictingFeeFieldsInRequest | EthApiError::Signing(_) | EthApiError::BothStateAndStateDiffInOverride(_) | @@ -151,6 +160,7 @@ impl From for ErrorObject<'static> { EthApiError::Internal(_) | EthApiError::TransactionNotFound | EthApiError::EvmCustom(_) | + EthApiError::EvmPrecompile(_) | EthApiError::InvalidRewardPercentiles => internal_rpc_err(error.to_string()), EthApiError::UnknownBlockNumber | EthApiError::UnknownBlockOrTxIndex => { rpc_error_with_code(EthRpcErrorCode::ResourceNotFound.code(), error.to_string()) @@ -161,9 +171,10 @@ impl From for ErrorObject<'static> { EthApiError::Unsupported(msg) => internal_rpc_err(msg), EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg), EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg), - err @ EthApiError::ExecutionTimedOut(_) => { - rpc_error_with_code(CALL_EXECUTION_FAILED_CODE, err.to_string()) - } + err @ EthApiError::ExecutionTimedOut(_) => rpc_error_with_code( + jsonrpsee_types::error::CALL_EXECUTION_FAILED_CODE, + err.to_string(), + ), err @ EthApiError::InternalBlockingTaskError | err @ EthApiError::InternalEthError => { internal_rpc_err(err.to_string()) } @@ -221,6 +232,7 @@ where EVMError::Header(InvalidHeader::ExcessBlobGasNotSet) => Self::ExcessBlobGasNotSet, EVMError::Database(err) => err.into(), EVMError::Custom(err) => Self::EvmCustom(err), + EVMError::Precompile(err) => Self::EvmPrecompile(err), } } } @@ -348,6 +360,15 @@ pub enum RpcInvalidTransactionError { /// Blob transaction is a create transaction #[error("blob transaction is a create transaction")] BlobTransactionIsCreate, + /// EOF crate should have `to` address + #[error("EOF crate should have `to` address")] + EofCrateShouldHaveToAddress, + /// EIP-7702 is not enabled. + #[error("EIP-7702 authorization list not supported")] + AuthorizationListNotSupported, + /// EIP-7702 transaction has invalid fields set. + #[error("EIP-7702 authorization list has invalid fields")] + AuthorizationListInvalidFields, /// Optimism related error #[error(transparent)] #[cfg(feature = "optimism")] @@ -381,7 +402,7 @@ impl RpcInvalidTransactionError { /// Converts the halt error /// /// Takes the configured gas limit of the transaction which is attached to the error - pub(crate) const fn halt(reason: HaltReason, gas_limit: u64) -> Self { + pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), HaltReason::NonceOverflow => Self::NonceMaxValue, @@ -390,7 +411,7 @@ impl RpcInvalidTransactionError { } /// Converts the out of gas error - pub(crate) const fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self { + pub const fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self { match reason { OutOfGasError::Basic => Self::BasicOutOfGas(gas_limit), OutOfGasError::Memory | OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit), @@ -400,7 +421,7 @@ impl RpcInvalidTransactionError { } } -impl From for ErrorObject<'static> { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(err: RpcInvalidTransactionError) -> Self { match err { RpcInvalidTransactionError::Revert(revert) => { @@ -442,6 +463,13 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { max, have } => Self::TooManyBlobs { max, have }, InvalidTransaction::BlobCreateTransaction => Self::BlobTransactionIsCreate, + InvalidTransaction::EofCrateShouldHaveToAddress => Self::EofCrateShouldHaveToAddress, + InvalidTransaction::AuthorizationListNotSupported => { + Self::AuthorizationListNotSupported + } + InvalidTransaction::AuthorizationListInvalidFields => { + Self::AuthorizationListInvalidFields + } #[cfg(feature = "optimism")] InvalidTransaction::DepositSystemTxPostRegolith => { Self::Optimism(OptimismInvalidTransactionError::DepositSystemTxPostRegolith) @@ -450,8 +478,6 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::HaltedDepositPostRegolith => { Self::Optimism(OptimismInvalidTransactionError::HaltedDepositPostRegolith) } - // TODO(EOF) - InvalidTransaction::EofCrateShouldHaveToAddress => todo!("EOF"), } } } @@ -575,7 +601,7 @@ pub enum RpcPoolError { Other(Box), } -impl From for ErrorObject<'static> { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: RpcPoolError) -> Self { match error { RpcPoolError::Invalid(err) => err.into(), @@ -650,7 +676,7 @@ pub enum SignError { /// Converts the evm [`ExecutionResult`] into a result where `Ok` variant is the output bytes if it /// is [`ExecutionResult::Success`]. -pub(crate) fn ensure_success(result: ExecutionResult) -> EthResult { +pub fn ensure_success(result: ExecutionResult) -> EthResult { match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs similarity index 98% rename from crates/rpc/rpc/src/eth/api/fee_history.rs rename to crates/rpc/rpc-eth-types/src/fee_history.rs index f6e025166abf..b0f36a83d17d 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -1,27 +1,31 @@ //! Consist of types adjacent to the fee history cache and its configs -use crate::eth::{cache::EthStateCache, error::EthApiError}; +use std::{ + collections::{BTreeMap, VecDeque}, + fmt::Debug, + sync::{atomic::Ordering::SeqCst, Arc}, +}; + use futures::{ future::{Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; use metrics::atomics::AtomicU64; +use reth_chainspec::ChainSpec; use reth_primitives::{ basefee::calc_next_block_base_fee, eip4844::{calc_blob_gasprice, calculate_excess_blob_gas}, - ChainSpec, Receipt, SealedBlock, TransactionSigned, B256, + Receipt, SealedBlock, TransactionSigned, B256, }; use reth_provider::{BlockReaderIdExt, CanonStateNotification, ChainSpecProvider}; -use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_rpc_types::TxGasAndReward; use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeMap, VecDeque}, - fmt::Debug, - sync::{atomic::Ordering::SeqCst, Arc}, -}; use tracing::trace; +use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; + +use super::{EthApiError, EthStateCache}; + /// Contains cached fee history entries for blocks. /// /// Purpose for this is to provide cached data for `eth_feeHistory`. @@ -164,7 +168,7 @@ impl FeeHistoryCache { } /// Settings for the [`FeeHistoryCache`]. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct FeeHistoryCacheConfig { /// Max number of blocks in cache. @@ -262,7 +266,7 @@ pub async fn fee_history_cache_new_blocks_task( /// the corresponding rewards for the transactions at each percentile. /// /// The results are returned as a vector of U256 values. -pub(crate) fn calculate_reward_percentiles_for_block( +pub fn calculate_reward_percentiles_for_block( percentiles: &[f64], gas_used: u64, base_fee_per_gas: u64, diff --git a/crates/rpc/rpc/src/eth/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs similarity index 92% rename from crates/rpc/rpc/src/eth/gas_oracle.rs rename to crates/rpc/rpc-eth-types/src/gas_oracle.rs index 40246ce92a91..af69249c93c6 100644 --- a/crates/rpc/rpc/src/eth/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,22 +1,30 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. -use crate::eth::{ - cache::EthStateCache, - error::{EthApiError, EthResult, RpcInvalidTransactionError}, -}; -use derive_more::{Deref, DerefMut}; +use std::fmt::{self, Debug, Formatter}; + +use derive_more::{Deref, DerefMut, From, Into}; use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag, B256, U256}; use reth_provider::BlockReaderIdExt; -use reth_rpc_server_types::constants::gas_oracle::*; +use reth_rpc_server_types::constants; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; -use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; +use reth_rpc_server_types::constants::gas_oracle::{ + DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, + DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, +}; + +use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; + +/// The default gas limit for `eth_call` and adjacent calls. See +/// [`RPC_DEFAULT_GAS_CAP`](constants::gas_oracle::RPC_DEFAULT_GAS_CAP). +pub const RPC_DEFAULT_GAS_CAP: GasCap = GasCap(constants::gas_oracle::RPC_DEFAULT_GAS_CAP); + /// Settings for the [`GasPriceOracle`] -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GasPriceOracleConfig { /// The number of populated blocks to produce the gas price estimate @@ -73,7 +81,7 @@ pub struct GasPriceOracle { impl GasPriceOracle where - Provider: BlockReaderIdExt + 'static, + Provider: BlockReaderIdExt, { /// Creates and returns the [`GasPriceOracle`]. pub fn new( @@ -286,6 +294,16 @@ impl Default for GasPriceOracleResult { } } +/// The wrapper type for gas limit +#[derive(Debug, Clone, Copy, From, Into)] +pub struct GasCap(pub u64); + +impl Default for GasCap { + fn default() -> Self { + RPC_DEFAULT_GAS_CAP + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc/src/eth/id_provider.rs b/crates/rpc/rpc-eth-types/src/id_provider.rs similarity index 84% rename from crates/rpc/rpc/src/eth/id_provider.rs rename to crates/rpc/rpc-eth-types/src/id_provider.rs index 9b330f5feca9..97375624ca62 100644 --- a/crates/rpc/rpc/src/eth/id_provider.rs +++ b/crates/rpc/rpc-eth-types/src/id_provider.rs @@ -1,14 +1,19 @@ -use jsonrpsee::types::SubscriptionId; +//! Helper type for `reth_rpc_eth_api::EthPubSubApiServer` implementation. +//! +//! Generates IDs for tracking subscriptions. + use std::fmt::Write; -/// An [`IdProvider`](jsonrpsee::core::traits::IdProvider) for ethereum subscription ids. +use jsonrpsee_types::SubscriptionId; + +/// An [`IdProvider`](jsonrpsee_core::traits::IdProvider) for ethereum subscription ids. /// /// Returns new hex-string [QUANTITY](https://ethereum.org/en/developers/docs/apis/json-rpc/#quantities-encoding) ids #[derive(Debug, Clone, Copy, Default)] #[non_exhaustive] pub struct EthSubscriptionIdProvider; -impl jsonrpsee::core::traits::IdProvider for EthSubscriptionIdProvider { +impl jsonrpsee_core::traits::IdProvider for EthSubscriptionIdProvider { fn next_id(&self) -> SubscriptionId<'static> { to_quantity(rand::random::()) } diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs new file mode 100644 index 000000000000..fb9901dd071c --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -0,0 +1,36 @@ +//! Reth RPC server types, used in server implementation of `eth` namespace API. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +pub mod cache; +pub mod error; +pub mod fee_history; +pub mod gas_oracle; +pub mod id_provider; +pub mod logs_utils; +pub mod pending_block; +pub mod receipt; +pub mod revm_utils; +pub mod transaction; +pub mod utils; + +pub use cache::{ + config::EthStateCacheConfig, db::StateCacheDb, multi_consumer::MultiConsumerLruCache, + EthStateCache, +}; +pub use error::{EthApiError, EthResult, RevertError, RpcInvalidTransactionError, SignError}; +pub use fee_history::{FeeHistoryCache, FeeHistoryCacheConfig, FeeHistoryEntry}; +pub use gas_oracle::{ + GasCap, GasPriceOracle, GasPriceOracleConfig, GasPriceOracleResult, RPC_DEFAULT_GAS_CAP, +}; +pub use id_provider::EthSubscriptionIdProvider; +pub use logs_utils::EthFilterError; +pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; +pub use receipt::ReceiptBuilder; +pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc/src/eth/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs similarity index 78% rename from crates/rpc/rpc/src/eth/logs_utils.rs rename to crates/rpc/rpc-eth-types/src/logs_utils.rs index 838146aae448..2bedad16bb4f 100644 --- a/crates/rpc/rpc/src/eth/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -1,11 +1,66 @@ -use super::filter::FilterError; -use alloy_primitives::TxHash; -use reth_primitives::{BlockNumHash, ChainInfo, Receipt}; +//! Helper functions for `reth_rpc_eth_api::EthFilterApiServer` implementation. +//! +//! Log parsing for building filter. + +use reth_chainspec::ChainInfo; +use reth_primitives::{BlockNumHash, Receipt, TxHash}; use reth_provider::{BlockReader, ProviderError}; -use reth_rpc_types::{FilteredParams, Log}; +use reth_rpc_server_types::result::rpc_error_with_code; +use reth_rpc_types::{FilterId, FilteredParams, Log}; + +use crate::EthApiError; + +/// Errors that can occur in the handler implementation +#[derive(Debug, thiserror::Error)] +pub enum EthFilterError { + /// Filter not found. + #[error("filter not found")] + FilterNotFound(FilterId), + /// Invalid block range. + #[error("invalid block range params")] + InvalidBlockRangeParams, + /// Query scope is too broad. + #[error("query exceeds max block range {0}")] + QueryExceedsMaxBlocks(u64), + /// Query result is too large. + #[error("query exceeds max results {0}")] + QueryExceedsMaxResults(usize), + /// Error serving request in `eth_` namespace. + #[error(transparent)] + EthAPIError(#[from] EthApiError), + /// Error thrown when a spawned task failed to deliver a response. + #[error("internal filter error")] + InternalError, +} + +// convert the error +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: EthFilterError) -> Self { + match err { + EthFilterError::FilterNotFound(_) => { + rpc_error_with_code(jsonrpsee_types::error::INVALID_PARAMS_CODE, "filter not found") + } + err @ EthFilterError::InternalError => { + rpc_error_with_code(jsonrpsee_types::error::INTERNAL_ERROR_CODE, err.to_string()) + } + EthFilterError::EthAPIError(err) => err.into(), + err @ EthFilterError::InvalidBlockRangeParams | + err @ EthFilterError::QueryExceedsMaxBlocks(_) | + err @ EthFilterError::QueryExceedsMaxResults(_) => { + rpc_error_with_code(jsonrpsee_types::error::INVALID_PARAMS_CODE, err.to_string()) + } + } + } +} + +impl From for EthFilterError { + fn from(err: ProviderError) -> Self { + Self::EthAPIError(err.into()) + } +} /// Returns all matching of a block's receipts when the transaction hashes are known. -pub(crate) fn matching_block_logs_with_tx_hashes<'a, I>( +pub fn matching_block_logs_with_tx_hashes<'a, I>( filter: &FilteredParams, block_num_hash: BlockNumHash, tx_hashes_and_receipts: I, @@ -42,7 +97,7 @@ where /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub(crate) fn append_matching_block_logs( +pub fn append_matching_block_logs( all_logs: &mut Vec, provider: impl BlockReader, filter: &FilteredParams, @@ -50,7 +105,7 @@ pub(crate) fn append_matching_block_logs( receipts: &[Receipt], removed: bool, block_timestamp: u64, -) -> Result<(), FilterError> { +) -> Result<(), EthFilterError> { // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; @@ -109,7 +164,7 @@ pub(crate) fn append_matching_block_logs( } /// Returns true if the log matches the filter and should be included -pub(crate) fn log_matches_filter( +pub fn log_matches_filter( block: BlockNumHash, log: &reth_primitives::Log, params: &FilteredParams, @@ -126,7 +181,7 @@ pub(crate) fn log_matches_filter( } /// Computes the block range based on the filter range and current block numbers -pub(crate) fn get_filter_block_range( +pub fn get_filter_block_range( from_block: Option, to_block: Option, start_block: u64, diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs new file mode 100644 index 000000000000..64dd2aeb59b4 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -0,0 +1,123 @@ +//! Helper types for `reth_rpc_eth_api::EthApiServer` implementation. +//! +//! Types used in block building. + +use std::{fmt, time::Instant}; + +use derive_more::Constructor; +use reth_chainspec::ChainSpec; +use reth_primitives::{BlockId, BlockNumberOrTag, SealedBlockWithSenders, SealedHeader, B256}; +use reth_provider::ProviderError; +use reth_revm::state_change::apply_blockhashes_update; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, +}; + +use super::{EthApiError, EthResult}; + +/// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block +#[derive(Debug, Clone, Constructor)] +pub struct PendingBlockEnv { + /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. + pub cfg: CfgEnvWithHandlerCfg, + /// Configured [`BlockEnv`] for the pending block. + pub block_env: BlockEnv, + /// Origin block for the config + pub origin: PendingBlockEnvOrigin, +} + +/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block state transitions. +/// +/// This constructs a new [Evm](revm::Evm) with the given DB, and environment +/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`]. +/// +/// This uses [`apply_blockhashes_update`]. +pub fn pre_block_blockhashes_update + DatabaseCommit>( + db: &mut DB, + chain_spec: &ChainSpec, + initialized_block_env: &BlockEnv, + block_number: u64, + parent_block_hash: B256, +) -> EthResult<()> +where + DB::Error: fmt::Display, +{ + apply_blockhashes_update( + db, + chain_spec, + initialized_block_env.timestamp.to::(), + block_number, + parent_block_hash, + ) + .map_err(|err| EthApiError::Internal(err.into())) +} + +/// The origin for a configured [`PendingBlockEnv`] +#[derive(Clone, Debug)] +pub enum PendingBlockEnvOrigin { + /// The pending block as received from the CL. + ActualPending(SealedBlockWithSenders), + /// The _modified_ header of the latest block. + /// + /// This derives the pending state based on the latest header by modifying: + /// - the timestamp + /// - the block number + /// - fees + DerivedFromLatest(SealedHeader), +} + +impl PendingBlockEnvOrigin { + /// Returns true if the origin is the actual pending block as received from the CL. + pub const fn is_actual_pending(&self) -> bool { + matches!(self, Self::ActualPending(_)) + } + + /// Consumes the type and returns the actual pending block. + pub fn into_actual_pending(self) -> Option { + match self { + Self::ActualPending(block) => Some(block), + _ => None, + } + } + + /// Returns the [`BlockId`] that represents the state of the block. + /// + /// If this is the actual pending block, the state is the "Pending" tag, otherwise we can safely + /// identify the block by its hash (latest block). + pub fn state_block_id(&self) -> BlockId { + match self { + Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), + Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), + } + } + + /// Returns the hash of the block the pending block should be built on. + /// + /// For the [`PendingBlockEnvOrigin::ActualPending`] this is the parent hash of the block. + /// For the [`PendingBlockEnvOrigin::DerivedFromLatest`] this is the hash of the _latest_ + /// header. + pub fn build_target_hash(&self) -> B256 { + match self { + Self::ActualPending(block) => block.parent_hash, + Self::DerivedFromLatest(header) => header.hash(), + } + } + + /// Returns the header this pending block is based on. + pub fn header(&self) -> &SealedHeader { + match self { + Self::ActualPending(block) => &block.header, + Self::DerivedFromLatest(header) => header, + } + } +} + +/// In memory pending block for `pending` tag +#[derive(Debug, Constructor)] +pub struct PendingBlock { + /// The cached pending block + pub block: SealedBlockWithSenders, + /// Timestamp when the pending block is considered outdated + pub expires_at: Instant, +} diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs new file mode 100644 index 000000000000..cd3fd1ed514b --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -0,0 +1,126 @@ +//! RPC receipt response builder, extends a layer one receipt with layer two data. + +use reth_primitives::{Address, Receipt, TransactionMeta, TransactionSigned, TxKind}; +use reth_rpc_types::{ + AnyReceiptEnvelope, AnyTransactionReceipt, Log, OtherFields, ReceiptWithBloom, + TransactionReceipt, WithOtherFields, +}; +use revm_primitives::calc_blob_gasprice; + +use super::{EthApiError, EthResult}; + +/// Receipt response builder. +#[derive(Debug)] +pub struct ReceiptBuilder { + /// The base response body, contains L1 fields. + base: TransactionReceipt>, + /// Additional L2 fields. + other: OtherFields, +} + +impl ReceiptBuilder { + /// Returns a new builder with the base response body (L1 fields) set. + /// + /// Note: This requires _all_ block receipts because we need to calculate the gas used by the + /// transaction. + pub fn new( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + ) -> EthResult { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = transaction + .recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() + }; + + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = + blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); + + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } + + let mut logs = Vec::with_capacity(receipt.logs.len()); + for (tx_log_idx, log) in receipt.logs.iter().enumerate() { + let rpclog = Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }; + logs.push(rpclog); + } + + let rpc_receipt = reth_rpc_types::Receipt { + status: receipt.success.into(), + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + #[allow(clippy::needless_update)] + let base = TransactionReceipt { + inner: AnyReceiptEnvelope { + inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, + r#type: transaction.transaction.tx_type().into(), + }, + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // TODO pre-byzantium receipts have a post-transaction state root + state_root: None, + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + }; + + Ok(Self { base, other: Default::default() }) + } + + /// Adds fields to response body. + pub fn add_other_fields(mut self, mut fields: OtherFields) -> Self { + self.other.append(&mut fields); + self + } + + /// Builds a receipt response from the base response body, and any set additional fields. + pub fn build(self) -> AnyTransactionReceipt { + let Self { base, other } = self; + let mut res = WithOtherFields::new(base); + res.other = other; + + res + } +} diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs similarity index 82% rename from crates/rpc/rpc/src/eth/revm_utils.rs rename to crates/rpc/rpc-eth-types/src/revm_utils.rs index fb008b8943b3..d1459309d9ff 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -1,16 +1,10 @@ //! utilities for working with revm -use crate::eth::error::{EthApiError, EthResult, RpcInvalidTransactionError}; -#[cfg(feature = "optimism")] -use reth_primitives::revm::env::fill_op_tx_env; -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; -use reth_primitives::{ - revm::env::fill_tx_env_with_recovered, Address, TransactionSigned, - TransactionSignedEcRecovered, TxHash, TxKind, B256, U256, -}; +use std::cmp::min; + +use reth_primitives::{Address, TxKind, B256, U256}; use reth_rpc_types::{ - state::{AccountOverride, StateOverride}, + state::{AccountOverride, EvmOverrides, StateOverride}, BlockOverrides, TransactionRequest, }; #[cfg(feature = "optimism")] @@ -19,103 +13,17 @@ use revm::{ db::CacheDB, precompile::{PrecompileSpecId, Precompiles}, primitives::{ - db::DatabaseRef, BlockEnv, Bytecode, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, - TransactTo, TxEnv, + db::DatabaseRef, BlockEnv, Bytecode, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv, }, Database, }; -use std::cmp::min; use tracing::trace; -/// Helper type that bundles various overrides for EVM Execution. -/// -/// By `Default`, no overrides are included. -#[derive(Debug, Clone, Default)] -pub struct EvmOverrides { - /// Applies overrides to the state before execution. - pub state: Option, - /// Applies overrides to the block before execution. - /// - /// This is a `Box` because less common and only available in debug trace endpoints. - pub block: Option>, -} - -impl EvmOverrides { - /// Creates a new instance with the given overrides - pub const fn new(state: Option, block: Option>) -> Self { - Self { state, block } - } - - /// Creates a new instance with the given state overrides. - pub const fn state(state: Option) -> Self { - Self { state, block: None } - } - - /// Returns `true` if the overrides contain state overrides. - pub const fn has_state(&self) -> bool { - self.state.is_some() - } -} - -impl From> for EvmOverrides { - fn from(state: Option) -> Self { - Self::state(state) - } -} - -/// Helper type to work with different transaction types when configuring the EVM env. -/// -/// This makes it easier to handle errors. -pub trait FillableTransaction { - /// Returns the hash of the transaction. - fn hash(&self) -> TxHash; - - /// Fill the transaction environment with the given transaction. - fn try_fill_tx_env(&self, tx_env: &mut TxEnv) -> EthResult<()>; -} - -impl FillableTransaction for TransactionSignedEcRecovered { - fn hash(&self) -> TxHash { - self.hash - } - - fn try_fill_tx_env(&self, tx_env: &mut TxEnv) -> EthResult<()> { - #[cfg(not(feature = "optimism"))] - fill_tx_env_with_recovered(tx_env, self); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(self.length_without_header()); - self.encode_enveloped(&mut envelope_buf); - fill_tx_env_with_recovered(tx_env, self, envelope_buf.into()); - } - Ok(()) - } -} -impl FillableTransaction for TransactionSigned { - fn hash(&self) -> TxHash { - self.hash - } - - fn try_fill_tx_env(&self, tx_env: &mut TxEnv) -> EthResult<()> { - let signer = - self.recover_signer().ok_or_else(|| EthApiError::InvalidTransactionSignature)?; - #[cfg(not(feature = "optimism"))] - fill_tx_env(tx_env, self, signer); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(self.length_without_header()); - self.encode_enveloped(&mut envelope_buf); - fill_op_tx_env(tx_env, self, signer, envelope_buf.into()); - } - Ok(()) - } -} +use super::{EthApiError, EthResult, RpcInvalidTransactionError}; /// Returns the addresses of the precompiles corresponding to the `SpecId`. #[inline] -pub(crate) fn get_precompiles(spec_id: SpecId) -> impl IntoIterator { +pub fn get_precompiles(spec_id: SpecId) -> impl IntoIterator { let spec = PrecompileSpecId::from_spec_id(spec_id); Precompiles::new(spec).addresses().copied().map(Address::from) } @@ -129,7 +37,7 @@ pub(crate) fn get_precompiles(spec_id: SpecId) -> impl IntoIterator( +pub fn prepare_call_env( mut cfg: CfgEnvWithHandlerCfg, mut block: BlockEnv, request: TransactionRequest, @@ -199,7 +107,7 @@ where /// `eth_call`. /// /// Note: this does _not_ access the Database to check the sender. -pub(crate) fn build_call_evm_env( +pub fn build_call_evm_env( cfg: CfgEnvWithHandlerCfg, block: BlockEnv, request: TransactionRequest, @@ -212,10 +120,7 @@ pub(crate) fn build_call_evm_env( /// /// All [`TxEnv`] fields are derived from the given [`TransactionRequest`], if fields are `None`, /// they fall back to the [`BlockEnv`]'s settings. -pub(crate) fn create_txn_env( - block_env: &BlockEnv, - request: TransactionRequest, -) -> EthResult { +pub fn create_txn_env(block_env: &BlockEnv, request: TransactionRequest) -> EthResult { // Ensure that if versioned hashes are set, they're not empty if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into()); @@ -250,26 +155,21 @@ pub(crate) fn create_txn_env( )?; let gas_limit = gas.unwrap_or_else(|| block_env.gas_limit.min(U256::from(u64::MAX)).to()); - let transact_to = match to { - Some(TxKind::Call(to)) => TransactTo::call(to), - _ => TransactTo::create(), - }; let env = TxEnv { gas_limit: gas_limit.try_into().map_err(|_| RpcInvalidTransactionError::GasUintOverflow)?, nonce, caller: from.unwrap_or_default(), gas_price, gas_priority_fee: max_priority_fee_per_gas, - transact_to, + transact_to: to.unwrap_or(TxKind::Create), value: value.unwrap_or_default(), data: input.try_into_unique_input()?.unwrap_or_default(), chain_id, - access_list: access_list - .map(reth_rpc_types::AccessList::into_flattened) - .unwrap_or_default(), + access_list: access_list.unwrap_or_default().into(), // EIP-4844 fields blob_hashes: blob_versioned_hashes.unwrap_or_default(), max_fee_per_blob_gas, + authorization_list: None, #[cfg(feature = "optimism")] optimism: OptimismFields { enveloped_tx: Some(Bytes::new()), ..Default::default() }, }; @@ -278,10 +178,7 @@ pub(crate) fn create_txn_env( } /// Caps the configured [`TxEnv`] `gas_limit` with the allowance of the caller. -pub(crate) fn cap_tx_gas_limit_with_caller_allowance( - db: &mut DB, - env: &mut TxEnv, -) -> EthResult<()> +pub fn cap_tx_gas_limit_with_caller_allowance(db: &mut DB, env: &mut TxEnv) -> EthResult<()> where DB: Database, EthApiError: From<::Error>, @@ -299,7 +196,7 @@ where /// /// Returns an error if the caller has insufficient funds. /// Caution: This assumes non-zero `env.gas_price`. Otherwise, zero allowance will be returned. -pub(crate) fn caller_gas_allowance(db: &mut DB, env: &TxEnv) -> EthResult +pub fn caller_gas_allowance(db: &mut DB, env: &TxEnv) -> EthResult where DB: Database, EthApiError: From<::Error>, @@ -321,7 +218,8 @@ where } /// Helper type for representing the fees of a [`TransactionRequest`] -pub(crate) struct CallFees { +#[derive(Debug)] +pub struct CallFees { /// EIP-1559 priority fee max_priority_fee_per_gas: Option, /// Unified gas price setting @@ -484,10 +382,7 @@ fn apply_block_overrides(overrides: BlockOverrides, env: &mut BlockEnv) { } /// Applies the given state overrides (a set of [`AccountOverride`]) to the [`CacheDB`]. -pub(crate) fn apply_state_overrides( - overrides: StateOverride, - db: &mut CacheDB, -) -> EthResult<()> +pub fn apply_state_overrides(overrides: StateOverride, db: &mut CacheDB) -> EthResult<()> where DB: DatabaseRef, EthApiError: From<::Error>, @@ -559,9 +454,8 @@ where #[cfg(test)] mod tests { - use reth_primitives::constants::GWEI_TO_WEI; - use super::*; + use reth_primitives::constants::GWEI_TO_WEI; #[test] fn test_ensure_0_fallback() { diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs new file mode 100644 index 000000000000..32c81d3966f8 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -0,0 +1,96 @@ +//! Helper types for `reth_rpc_eth_api::EthApiServer` implementation. +//! +//! Transaction wrapper that labels transaction with its origin. + +use reth_primitives::{TransactionSignedEcRecovered, B256}; +use reth_rpc_types::{Transaction, TransactionInfo}; +use reth_rpc_types_compat::transaction::from_recovered_with_block_context; + +/// Represents from where a transaction was fetched. +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum TransactionSource { + /// Transaction exists in the pool (Pending) + Pool(TransactionSignedEcRecovered), + /// Transaction already included in a block + /// + /// This can be a historical block or a pending block (received from the CL) + Block { + /// Transaction fetched via provider + transaction: TransactionSignedEcRecovered, + /// Index of the transaction in the block + index: u64, + /// Hash of the block. + block_hash: B256, + /// Number of the block. + block_number: u64, + /// base fee of the block. + base_fee: Option, + }, +} + +// === impl TransactionSource === + +impl TransactionSource { + /// Consumes the type and returns the wrapped transaction. + pub fn into_recovered(self) -> TransactionSignedEcRecovered { + self.into() + } + + /// Returns the transaction and block related info, if not pending + pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + match self { + Self::Pool(tx) => { + let hash = tx.hash(); + ( + tx, + TransactionInfo { + hash: Some(hash), + index: None, + block_hash: None, + block_number: None, + base_fee: None, + }, + ) + } + Self::Block { transaction, index, block_hash, block_number, base_fee } => { + let hash = transaction.hash(); + ( + transaction, + TransactionInfo { + hash: Some(hash), + index: Some(index), + block_hash: Some(block_hash), + block_number: Some(block_number), + base_fee: base_fee.map(u128::from), + }, + ) + } + } + } +} + +impl From for TransactionSignedEcRecovered { + fn from(value: TransactionSource) -> Self { + match value { + TransactionSource::Pool(tx) => tx, + TransactionSource::Block { transaction, .. } => transaction, + } + } +} + +impl From for Transaction { + fn from(value: TransactionSource) -> Self { + match value { + TransactionSource::Pool(tx) => reth_rpc_types_compat::transaction::from_recovered(tx), + TransactionSource::Block { transaction, index, block_hash, block_number, base_fee } => { + from_recovered_with_block_context( + transaction, + block_hash, + block_number, + base_fee, + index as usize, + ) + } + } + } +} diff --git a/crates/rpc/rpc/src/eth/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs similarity index 79% rename from crates/rpc/rpc/src/eth/utils.rs rename to crates/rpc/rpc-eth-types/src/utils.rs index c780ec4a6f81..783bb55caba1 100644 --- a/crates/rpc/rpc/src/eth/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,14 +1,13 @@ //! Commonly used code snippets -use crate::eth::error::{EthApiError, EthResult}; use reth_primitives::{Bytes, PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +use super::{EthApiError, EthResult}; + /// Recovers a [`PooledTransactionsElementEcRecovered`] from an enveloped encoded byte stream. /// /// See [`PooledTransactionsElement::decode_enveloped`] -pub(crate) fn recover_raw_transaction( - data: Bytes, -) -> EthResult { +pub fn recover_raw_transaction(data: Bytes) -> EthResult { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData); } diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index 0a11ae8024f6..255273194a37 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -232,7 +232,7 @@ mod tests { let body = r#"{"jsonrpc": "2.0", "method": "greet_melkor", "params": [], "id": 1}"#; let response = client - .post(&format!("http://{AUTH_ADDR}:{AUTH_PORT}")) + .post(format!("http://{AUTH_ADDR}:{AUTH_PORT}")) .bearer_auth(jwt.unwrap_or_default()) .body(body) .header(header::CONTENT_TYPE, "application/json") diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index ddecc0a490ca..628654ebaf98 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -12,9 +12,20 @@ description = "RPC server types and constants" workspace = true [dependencies] +reth-errors.workspace = true +reth-network-api.workspace = true +reth-primitives.workspace = true +reth-rpc-types.workspace = true + + # ethereum alloy-primitives.workspace = true +# rpc +jsonrpsee-core.workspace = true +jsonrpsee-types.workspace = true + # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } + diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 3784d7508ff2..e3c129bf6e28 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -26,6 +26,9 @@ pub fn default_max_tracing_requests() -> usize { .map_or(25, |cpus| max(cpus.get().saturating_sub(RESERVED), RESERVED)) } +/// The default number of getproof calls we are allowing to run concurrently. +pub const DEFAULT_PROOF_PERMITS: usize = 25; + /// The default IPC endpoint #[cfg(windows)] pub const DEFAULT_IPC_ENDPOINT: &str = r"\\.\pipe\reth.ipc"; @@ -42,6 +45,12 @@ pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = r"\\.\pipe\reth_engine_api.ipc #[cfg(not(windows))] pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = "/tmp/reth_engine_api.ipc"; +/// The default eth historical proof window. +pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; + +/// Maximum eth historical proof window. Equivalent to roughly one month of data. +pub const MAX_ETH_PROOF_WINDOW: u64 = 216_000; + /// GPO specific constants pub mod gas_oracle { use alloy_primitives::U256; @@ -64,6 +73,20 @@ pub mod gas_oracle { /// The default minimum gas price, under which the sample will be ignored pub const DEFAULT_IGNORE_GAS_PRICE: U256 = U256::from_limbs([2u64, 0, 0, 0]); + + /// The default gas limit for `eth_call` and adjacent calls. + /// + /// This is different from the default to regular 30M block gas limit + /// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow + /// for more complex calls. + pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; + + /// Gas per transaction not creating a contract. + pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; + /// Allowed error ratio for gas estimation + /// Taken from Geth's implementation in order to pass the hive tests + /// + pub const ESTIMATE_GAS_ERROR_RATIO: f64 = 0.015; } /// Cache specific constants diff --git a/crates/rpc/rpc-server-types/src/lib.rs b/crates/rpc/rpc-server-types/src/lib.rs index 4bdee53f83cb..c20b578816b8 100644 --- a/crates/rpc/rpc-server-types/src/lib.rs +++ b/crates/rpc/rpc-server-types/src/lib.rs @@ -10,6 +10,9 @@ /// Common RPC constants. pub mod constants; +pub mod result; mod module; pub use module::{RethRpcModule, RpcModuleSelection}; + +pub use result::ToRpcResult; diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs similarity index 77% rename from crates/rpc/rpc/src/result.rs rename to crates/rpc/rpc-server-types/src/result.rs index f00c9e279939..252c78f241b2 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -1,9 +1,10 @@ //! Additional helpers for converting errors. -use jsonrpsee::core::RpcResult; -use reth_rpc_types::engine::PayloadError; use std::fmt::Display; +use jsonrpsee_core::RpcResult; +use reth_rpc_types::engine::PayloadError; + /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { /// Converts the error of the [Result] to an [`RpcResult`] via the `Err` [Display] impl. @@ -21,14 +22,14 @@ pub trait ToRpcResult: Sized { M: Into; /// Converts this type into an [`RpcResult`] with the - /// [`jsonrpsee::types::error::INTERNAL_ERROR_CODE` and the given message. + /// [`jsonrpsee_types::error::INTERNAL_ERROR_CODE`] and the given message. fn map_internal_err(self, op: F) -> RpcResult where F: FnOnce(Err) -> M, M: Into; /// Converts this type into an [`RpcResult`] with the - /// [`jsonrpsee::types::error::INTERNAL_ERROR_CODE`] and given message and data. + /// [`jsonrpsee_types::error::INTERNAL_ERROR_CODE`] and given message and data. fn map_internal_err_with_data<'a, F, M>(self, op: F) -> RpcResult where F: FnOnce(Err) -> (M, &'a [u8]), @@ -46,7 +47,7 @@ macro_rules! impl_to_rpc_result { ($err:ty) => { impl ToRpcResult for Result { #[inline] - fn map_rpc_err<'a, F, M>(self, op: F) -> jsonrpsee::core::RpcResult + fn map_rpc_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> (i32, M, Option<&'a [u8]>), M: Into, @@ -61,7 +62,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn map_internal_err<'a, F, M>(self, op: F) -> jsonrpsee::core::RpcResult + fn map_internal_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> M, M: Into, @@ -70,7 +71,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn map_internal_err_with_data<'a, F, M>(self, op: F) -> jsonrpsee::core::RpcResult + fn map_internal_err_with_data<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> (M, &'a [u8]), M: Into, @@ -85,7 +86,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn with_message(self, msg: &str) -> jsonrpsee::core::RpcResult { + fn with_message(self, msg: &str) -> jsonrpsee_core::RpcResult { match self { Ok(t) => Ok(t), Err(err) => { @@ -104,46 +105,44 @@ impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); /// Constructs an invalid params JSON-RPC error. -pub(crate) fn invalid_params_rpc_err( +pub fn invalid_params_rpc_err( msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { - rpc_err(jsonrpsee::types::error::INVALID_PARAMS_CODE, msg, None) +) -> jsonrpsee_types::error::ErrorObject<'static> { + rpc_err(jsonrpsee_types::error::INVALID_PARAMS_CODE, msg, None) } /// Constructs an internal JSON-RPC error. -pub(crate) fn internal_rpc_err( - msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { - rpc_err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, msg, None) +pub fn internal_rpc_err(msg: impl Into) -> jsonrpsee_types::error::ErrorObject<'static> { + rpc_err(jsonrpsee_types::error::INTERNAL_ERROR_CODE, msg, None) } /// Constructs an internal JSON-RPC error with data -pub(crate) fn internal_rpc_err_with_data( +pub fn internal_rpc_err_with_data( msg: impl Into, data: &[u8], -) -> jsonrpsee::types::error::ErrorObject<'static> { - rpc_err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, msg, Some(data)) +) -> jsonrpsee_types::error::ErrorObject<'static> { + rpc_err(jsonrpsee_types::error::INTERNAL_ERROR_CODE, msg, Some(data)) } /// Constructs an internal JSON-RPC error with code and message -pub(crate) fn rpc_error_with_code( +pub fn rpc_error_with_code( code: i32, msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { +) -> jsonrpsee_types::error::ErrorObject<'static> { rpc_err(code, msg, None) } /// Constructs a JSON-RPC error, consisting of `code`, `message` and optional `data`. -pub(crate) fn rpc_err( +pub fn rpc_err( code: i32, msg: impl Into, data: Option<&[u8]>, -) -> jsonrpsee::types::error::ErrorObject<'static> { - jsonrpsee::types::error::ErrorObject::owned( +) -> jsonrpsee_types::error::ErrorObject<'static> { + jsonrpsee_types::error::ErrorObject::owned( code, msg.into(), data.map(|data| { - jsonrpsee::core::to_json_raw_value(&reth_primitives::hex::encode_prefixed(data)) + jsonrpsee_core::to_json_raw_value(&reth_primitives::hex::encode_prefixed(data)) .expect("serializing String can't fail") }), ) diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index b14451969cec..8ab37d1b18d0 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -24,10 +24,9 @@ futures.workspace = true jsonrpsee = { workspace = true, features = ["client", "async-client"] } serde_json.workspace = true - # assertions similar-asserts.workspace = true - [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } +reth-rpc-eth-api.workspace = true diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index 92dbde21d9f0..9c408dc014c6 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ use futures::StreamExt; use jsonrpsee::http_client::HttpClientBuilder; -use reth_rpc_api::EthApiClient; use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url}; +use reth_rpc_eth_api::EthApiClient; use reth_rpc_types::trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index fa21f15c5544..a589ef418c64 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] reth-primitives.workspace = true reth-rpc-types.workspace = true +reth-trie-common.workspace = true alloy-rlp.workspace = true alloy-rpc-types.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 0627c9e32d68..079c5f659072 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -2,14 +2,14 @@ //! Ethereum's Engine use reth_primitives::{ - constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, + constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, proofs::{self}, Block, Header, Request, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, - ExecutionPayloadV4, PayloadError, + ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, }; /// Converts [`ExecutionPayloadV1`] to [Block] @@ -18,8 +18,8 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result Result { - let ExecutionPayloadV4 { payload_inner, deposit_requests, withdrawal_requests } = payload; + let ExecutionPayloadV4 { + payload_inner, + deposit_requests, + withdrawal_requests, + consolidation_requests, + } = payload; let mut block = try_payload_v3_to_block(payload_inner)?; // attach requests with asc type identifiers @@ -105,6 +110,7 @@ pub fn try_payload_v4_to_block(payload: ExecutionPayloadV4) -> Result>(); let requests_root = proofs::calculate_requests_root(&requests); @@ -211,10 +217,10 @@ pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option ExecutionPayloadV4 { - let (deposit_requests, withdrawal_requests) = + let (deposit_requests, withdrawal_requests, consolidation_requests) = value.requests.take().unwrap_or_default().into_iter().fold( - (Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals), request| { + (Vec::new(), Vec::new(), Vec::new()), + |(mut deposits, mut withdrawals, mut consolidation_requests), request| { match request { Request::DepositRequest(r) => { deposits.push(r); @@ -222,16 +228,20 @@ pub fn block_to_payload_v4(mut value: SealedBlock) -> ExecutionPayloadV4 { Request::WithdrawalRequest(r) => { withdrawals.push(r); } + Request::ConsolidationRequest(r) => { + consolidation_requests.push(r); + } _ => {} }; - (deposits, withdrawals) + (deposits, withdrawals, consolidation_requests) }, ); ExecutionPayloadV4 { deposit_requests, withdrawal_requests, + consolidation_requests, payload_inner: block_to_payload_v3(value).0, } } @@ -368,6 +378,52 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { } } +/// Converts [Block] to [`ExecutionPayloadBodyV2`] +pub fn convert_to_payload_body_v2(value: Block) -> ExecutionPayloadBodyV2 { + let transactions = value.body.into_iter().map(|tx| { + let mut out = Vec::new(); + tx.encode_enveloped(&mut out); + out.into() + }); + + let mut payload = ExecutionPayloadBodyV2 { + transactions: transactions.collect(), + withdrawals: value.withdrawals.map(Withdrawals::into_inner), + deposit_requests: None, + withdrawal_requests: None, + consolidation_requests: None, + }; + + if let Some(requests) = value.requests { + let (deposit_requests, withdrawal_requests, consolidation_requests) = + requests.into_iter().fold( + (Vec::new(), Vec::new(), Vec::new()), + |(mut deposits, mut withdrawals, mut consolidation_requests), request| { + match request { + Request::DepositRequest(r) => { + deposits.push(r); + } + Request::WithdrawalRequest(r) => { + withdrawals.push(r); + } + Request::ConsolidationRequest(r) => { + consolidation_requests.push(r); + } + _ => {} + }; + + (deposits, withdrawals, consolidation_requests) + }, + ); + + payload.deposit_requests = Some(deposit_requests); + payload.withdrawal_requests = Some(withdrawal_requests); + payload.consolidation_requests = Some(consolidation_requests); + } + + payload +} + /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); @@ -661,7 +717,8 @@ mod tests { "0x02f9021e8330182401843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120d694d6a0b0103651aafd87db6c88297175d7317c6e6da53ccf706c3c991c91fd0000000000000000000000000000000000000000000000000000000000000030b0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d625130000000000000000000000000000000000000000000000000000000000000060b9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56c080a099dc5b94a51e9b91a6425b1fed9792863006496ab71a4178524819d7db0c5e88a0119748e62700234079d91ae80f4676f9e0f71b260e9b46ef9b4aff331d3c2318" ], "withdrawalRequests": [], - "withdrawals": [] + "withdrawals": [], + "consolidationRequests": [] }"#; let payload = serde_json::from_str::(s).unwrap(); diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs index 17e5cc193633..9d2fec876b0c 100644 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ b/crates/rpc/rpc-types-compat/src/proof.rs @@ -1,12 +1,10 @@ //! Compatibility functions for rpc proof related types. -use reth_primitives::{ - proofs::{AccountProof, StorageProof}, - U64, -}; +use reth_primitives::U64; use reth_rpc_types::{ serde_helpers::JsonStorageKey, EIP1186AccountProofResponse, EIP1186StorageProof, }; +use reth_trie_common::{AccountProof, StorageProof}; /// Creates a new rpc storage proof from a primitive storage proof type. pub fn from_primitive_storage_proof(proof: StorageProof) -> EIP1186StorageProof { diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 7ef01886850d..fa5c8b79c9ae 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -16,21 +16,19 @@ workspace = true # ethereum alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } +alloy-rpc-types-admin.workspace = true alloy-rpc-types-anvil.workspace = true -alloy-rpc-types-trace.workspace = true alloy-rpc-types-beacon.workspace = true +alloy-rpc-types-mev.workspace = true +alloy-rpc-types-trace.workspace = true +alloy-rpc-types-txpool.workspace = true +alloy-serde.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } # misc serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } -[features] -default = ["jsonrpsee-types"] -arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] - - [dev-dependencies] # misc alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde", "arbitrary"] } @@ -40,4 +38,8 @@ proptest-derive.workspace = true rand.workspace = true similar-asserts.workspace = true bytes.workspace = true +serde_json.workspace = true +[features] +default = ["jsonrpsee-types"] +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] \ No newline at end of file diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 442f00769ec8..7f578ab29400 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -9,10 +9,8 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] - +#[allow(hidden_glob_reexports)] mod eth; -mod mev; -mod net; mod peer; mod rpc; @@ -22,17 +20,29 @@ pub use alloy_rpc_types::serde_helpers; // Ethereum specific rpc types coming from alloy. pub use alloy_rpc_types::*; +// Ethereum specific serde types coming from alloy. +pub use alloy_serde::*; + pub mod trace { //! RPC types for trace endpoints and inspectors. pub use alloy_rpc_types_trace::*; } +// re-export admin +pub use alloy_rpc_types_admin as admin; + // Anvil specific rpc types coming from alloy. pub use alloy_rpc_types_anvil as anvil; +// re-export mev +pub use alloy_rpc_types_mev as mev; + // re-export beacon pub use alloy_rpc_types_beacon as beacon; +// re-export txpool +pub use alloy_rpc_types_txpool as txpool; + // Ethereum specific rpc types related to typed transaction requests and the engine API. pub use eth::{ engine, @@ -43,7 +53,5 @@ pub use eth::{ transaction::{self, TransactionRequest, TypedTransactionRequest}, }; -pub use mev::*; -pub use net::*; pub use peer::*; pub use rpc::*; diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs index b434bcbf8493..eb77ac7922d4 100644 --- a/crates/rpc/rpc-types/src/net.rs +++ b/crates/rpc/rpc-types/src/net.rs @@ -1,4 +1,4 @@ -use alloy_rpc_types::admin::EthProtocolInfo; +use alloy_rpc_types_admin::EthProtocolInfo; use serde::{Deserialize, Serialize}; /// The status of the network being ran by the local node. diff --git a/crates/rpc/rpc-types/src/rpc.rs b/crates/rpc/rpc-types/src/rpc.rs index bb5ae5d77fbb..0b9afeb79a67 100644 --- a/crates/rpc/rpc-types/src/rpc.rs +++ b/crates/rpc/rpc-types/src/rpc.rs @@ -24,6 +24,7 @@ impl RpcModules { #[cfg(test)] mod tests { use super::*; + #[test] fn test_parse_module_versions_roundtrip() { let s = r#"{"txpool":"1.0","trace":"1.0","eth":"1.0","web3":"1.0","net":"1.0"}"#; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 0bf6c8bc5c17..256fc156bad4 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -13,13 +13,14 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-rpc-api.workspace = true -reth-rpc-server-types.workspace = true +reth-rpc-eth-api.workspace = true reth-rpc-types.workspace = true reth-errors.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-provider.workspace = true +reth-transaction-pool.workspace = true reth-network-api.workspace = true reth-rpc-engine-api.workspace = true reth-revm.workspace = true @@ -27,62 +28,55 @@ reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true revm-inspectors = { workspace = true, features = ["js-tracer"] } -reth-evm.workspace = true reth-network-peers.workspace = true - +reth-evm.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true reth-evm-optimism = { workspace = true, optional = true } # eth +alloy-dyn-abi.workspace = true alloy-rlp.workspace = true -alloy-dyn-abi = { workspace = true, features = ["eip712"] } alloy-primitives.workspace = true -alloy-sol-types.workspace = true +alloy-genesis.workspace = true revm = { workspace = true, features = [ "optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", ] } revm-primitives = { workspace = true, features = ["serde"] } +secp256k1.workspace = true # rpc jsonrpsee.workspace = true http.workspace = true http-body.workspace = true hyper.workspace = true -jsonwebtoken = "8" +jsonwebtoken.workspace = true +serde_json.workspace = true +jsonrpsee-types = { workspace = true, optional = true } # async async-trait.workspace = true tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true tower.workspace = true -tokio-stream = { workspace = true, features = ["sync"] } pin-project.workspace = true parking_lot.workspace = true -# metrics -reth-metrics.workspace = true -metrics.workspace = true - # misc -secp256k1 = { workspace = true, features = [ - "global-context", - "rand-std", - "recovery", -] } -serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true -thiserror.workspace = true -rand.workspace = true tracing.workspace = true tracing-futures = "0.2" -schnellru.workspace = true futures.workspace = true +rand.workspace = true +serde.workspace = true +thiserror.workspace = true derive_more.workspace = true -dyn-clone.workspace = true [dev-dependencies] reth-evm-ethereum.workspace = true reth-testing-utils.workspace = true +jsonrpsee-types.workspace = true jsonrpsee = { workspace = true, features = ["client"] } assert_matches.workspace = true @@ -93,6 +87,10 @@ optimism = [ "reth-primitives/optimism", "reth-rpc-types-compat/optimism", "reth-provider/optimism", -# "dep:reth-evm-optimism", - "reth-evm-optimism/optimism", + "reth-rpc-api/optimism", + "reth-rpc-eth-api/optimism", + "reth-revm/optimism", + "jsonrpsee-types", + "reth-evm-optimism", + "reth-rpc-eth-types/optimism", ] diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index b1b0ca70580c..1d59baa6e274 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -1,16 +1,18 @@ -use crate::result::ToRpcResult; +use std::sync::Arc; + +use alloy_genesis::ChainConfig; use alloy_primitives::B256; use async_trait::async_trait; use jsonrpsee::core::RpcResult; +use reth_chainspec::ChainSpec; use reth_network_api::{NetworkInfo, PeerKind, Peers}; -use reth_network_peers::AnyNode; -use reth_primitives::{ChainSpec, NodeRecord}; +use reth_network_peers::{id2pk, AnyNode, NodeRecord}; use reth_rpc_api::AdminApiServer; -use reth_rpc_types::{ - admin::{EthProtocolInfo, NodeInfo, Ports, ProtocolInfo}, - PeerEthProtocolInfo, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, +use reth_rpc_server_types::ToRpcResult; +use reth_rpc_types::admin::{ + EthInfo, EthPeerInfo, EthProtocolInfo, NodeInfo, PeerInfo, PeerNetworkInfo, PeerProtocolInfo, + Ports, ProtocolInfo, }; -use std::sync::Arc; /// `admin` API implementation. /// @@ -24,7 +26,7 @@ pub struct AdminApi { impl AdminApi { /// Creates a new instance of `AdminApi`. - pub fn new(network: N, chain_spec: Arc) -> Self { + pub const fn new(network: N, chain_spec: Arc) -> Self { Self { network, chain_spec } } } @@ -36,7 +38,7 @@ where { /// Handler for `admin_addPeer` fn add_peer(&self, record: NodeRecord) -> RpcResult { - self.network.add_peer(record.id, record.tcp_addr()); + self.network.add_peer_with_udp(record.id, record.tcp_addr(), record.udp_addr()); Ok(true) } @@ -49,7 +51,7 @@ where /// Handler for `admin_addTrustedPeer` fn add_trusted_peer(&self, record: AnyNode) -> RpcResult { if let Some(record) = record.node_record() { - self.network.add_trusted_peer(record.id, record.tcp_addr()) + self.network.add_trusted_peer_with_udp(record.id, record.tcp_addr(), record.udp_addr()) } self.network.add_trusted_peer_id(record.peer_id()); Ok(true) @@ -61,40 +63,57 @@ where Ok(true) } + /// Handler for `admin_peers` async fn peers(&self) -> RpcResult> { let peers = self.network.get_all_peers().await.to_rpc_result()?; - let peers = peers - .into_iter() - .map(|peer| PeerInfo { - id: Some(peer.remote_id.to_string()), - name: peer.client_version.to_string(), - caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), - network: PeerNetworkInfo { - remote_address: peer.remote_addr.to_string(), - local_address: peer - .local_addr - .unwrap_or_else(|| self.network.local_addr()) - .to_string(), - }, - protocols: PeerProtocolsInfo { - eth: Some(PeerEthProtocolInfo { - difficulty: Some(peer.status.total_difficulty), - head: peer.status.blockhash.to_string(), - version: peer.status.version as u32, - }), - pip: None, - }, - }) - .collect(); + let mut infos = Vec::with_capacity(peers.len()); - Ok(peers) + for peer in peers { + if let Ok(pk) = id2pk(peer.remote_id) { + infos.push(PeerInfo { + id: pk.to_string(), + name: peer.client_version.to_string(), + enode: peer.enode, + enr: peer.enr, + caps: peer + .capabilities + .capabilities() + .iter() + .map(|cap| cap.to_string()) + .collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr, + local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), + inbound: peer.direction.is_incoming(), + trusted: peer.kind.is_trusted(), + static_node: peer.kind.is_static(), + }, + protocols: PeerProtocolInfo { + eth: Some(EthPeerInfo::Info(EthInfo { + version: peer.status.version as u64, + })), + snap: None, + other: Default::default(), + }, + }) + } + } + + Ok(infos) } /// Handler for `admin_nodeInfo` async fn node_info(&self) -> RpcResult { let enode = self.network.local_node_record(); let status = self.network.network_status().await.to_rpc_result()?; - let config = self.chain_spec.genesis().config.clone(); + let config = ChainConfig { + chain_id: self.chain_spec.chain.id(), + terminal_total_difficulty_passed: self + .chain_spec + .get_final_paris_total_difficulty() + .is_some(), + ..self.chain_spec.genesis().config.clone() + }; let node_info = NodeInfo { id: B256::from_slice(&enode.id.as_slice()[..32]), diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 18d0c9a4b790..26c7a21f7abf 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,25 +1,24 @@ -use crate::{ - eth::{ - error::{EthApiError, EthResult}, - revm_utils::{prepare_call_env, EvmOverrides}, - EthTransactions, - }, - result::{internal_rpc_err, ToRpcResult}, - EthApiSpec, -}; +use std::sync::Arc; + use alloy_rlp::{Decodable, Encodable}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; +use reth_chainspec::EthereumHardforks; +use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - revm::env::tx_env_with_recovered, Address, Block, BlockId, BlockNumberOrTag, Bytes, - TransactionSignedEcRecovered, Withdrawals, B256, U256, + Address, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSignedEcRecovered, B256, U256, }; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProviderBox, TransactionVariant, + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; +use reth_rpc_eth_api::helpers::{Call, EthApiSpec, EthTransactions, TraceExt}; +use reth_rpc_eth_types::{revm_utils::prepare_call_env, EthApiError, EthResult, StateCacheDb}; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ + state::EvmOverrides, trace::geth::{ BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, NoopFrame, TraceResult, @@ -35,7 +34,6 @@ use revm_inspectors::tracing::{ js::{JsInspector, TransactionContext}, FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, }; -use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. @@ -64,8 +62,13 @@ impl DebugApi { impl DebugApi where - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + 'static, - Eth: EthTransactions + 'static, + Provider: BlockReaderIdExt + + HeaderProvider + + ChainSpecProvider + + StateProviderFactory + + EvmEnvProvider + + 'static, + Eth: TraceExt + 'static, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -73,7 +76,7 @@ where } /// Trace the entire block asynchronously - async fn trace_block_with( + async fn trace_block( &self, at: BlockId, transactions: Vec, @@ -96,9 +99,13 @@ where let mut transactions = transactions.into_iter().enumerate().peekable(); while let Some((index, tx)) = transactions.next() { let tx_hash = tx.hash; - let tx = tx_env_with_recovered(&tx); + let env = EnvWithHandlerCfg { - env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), + env: Env::boxed( + cfg.cfg_env.clone(), + block_env.clone(), + Call::evm_config(this.eth_api()).tx_env(&tx), + ), handler_cfg: cfg.handler_cfg, }; let (result, state_changes) = this.trace_transaction( @@ -164,7 +171,7 @@ where .collect::>>()? }; - self.trace_block_with(parent.into(), transactions, cfg, block_env, opts).await + self.trace_block(parent.into(), transactions, cfg, block_env, opts).await } /// Replays a block and returns the trace of each transaction. @@ -181,7 +188,7 @@ where let ((cfg, block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(block_hash.into()), - self.inner.eth_api.block_by_id_with_senders(block_id), + self.inner.eth_api.block_with_senders(block_id), )?; let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; @@ -189,7 +196,7 @@ where // its parent block's state let state_at = block.parent_hash; - self.trace_block_with( + self.trace_block( state_at.into(), block.into_transactions_ecrecovered().collect(), cfg, @@ -237,7 +244,11 @@ where )?; let env = EnvWithHandlerCfg { - env: Env::boxed(cfg.cfg_env.clone(), block_env, tx_env_with_recovered(&tx)), + env: Env::boxed( + cfg.cfg_env.clone(), + block_env, + Call::evm_config(this.eth_api()).tx_env(&tx), + ), handler_cfg: cfg.handler_cfg, }; @@ -323,6 +334,10 @@ where self.inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', + // see + let db = db.0; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector @@ -345,6 +360,10 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', see + // + let db = db.0; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.try_into_mux_frame(&res, db)?; @@ -363,6 +382,10 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', see + // + let db = db.0; + let mut inspector = JsInspector::new(code, config)?; let (res, _) = this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; @@ -414,7 +437,7 @@ where let target_block = block_number.unwrap_or_default(); let ((cfg, mut block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(target_block), - self.inner.eth_api.block_by_id_with_senders(target_block), + self.inner.eth_api.block_with_senders(target_block), )?; let opts = opts.unwrap_or_default(); @@ -438,6 +461,7 @@ where } let this = self.clone(); + self.inner .eth_api .spawn_with_state_at_block(at.into(), move |state| { @@ -452,9 +476,12 @@ where // Execute all transactions until index for tx in transactions { - let tx = tx_env_with_recovered(&tx); let env = EnvWithHandlerCfg { - env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), + env: Env::boxed( + cfg.cfg_env.clone(), + block_env.clone(), + Call::evm_config(this.eth_api()).tx_env(&tx), + ), handler_cfg: cfg.handler_cfg, }; let (res, _) = this.inner.eth_api.transact(&mut db, env)?; @@ -518,7 +545,7 @@ where &self, opts: GethDebugTracingOptions, env: EnvWithHandlerCfg, - db: &mut CacheDB>, + db: &mut StateCacheDb<'_>, transaction_context: Option, ) -> EthResult<(GethTrace, revm_primitives::EvmState)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; @@ -614,8 +641,13 @@ where #[async_trait] impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + 'static, - Eth: EthApiSpec + 'static, + Provider: BlockReaderIdExt + + HeaderProvider + + ChainSpecProvider + + StateProviderFactory + + EvmEnvProvider + + 'static, + Eth: EthApiSpec + EthTransactions + TraceExt + 'static, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { @@ -642,17 +674,14 @@ where /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { - let block = self.inner.provider.block_by_id(block_id).to_rpc_result()?; - + let block = self + .inner + .provider + .block_by_id(block_id) + .to_rpc_result()? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; let mut res = Vec::new(); - if let Some(mut block) = block { - // In RPC withdrawals are always present - if block.withdrawals.is_none() { - block.withdrawals = Some(Withdrawals::default()); - } - block.encode(&mut res); - } - + block.encode(&mut res); Ok(res.into()) } diff --git a/crates/rpc/rpc/src/eth/api/optimism.rs b/crates/rpc/rpc/src/eth/api/optimism.rs deleted file mode 100644 index af58450145b8..000000000000 --- a/crates/rpc/rpc/src/eth/api/optimism.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Optimism helpers. - -use revm::L1BlockInfo; - -/// Optimism Transaction Metadata -/// -/// Includes the L1 fee and data gas for the tx along with the L1 -/// block info. In order to pass the [`OptimismTxMeta`] into the -/// async colored `build_transaction_receipt_with_block_receipts` -/// function, a reference counter for the L1 block info is -/// used so the L1 block info can be shared between receipts. -#[derive(Debug, Default, Clone)] -pub(crate) struct OptimismTxMeta { - /// The L1 block info. - pub(crate) l1_block_info: Option, - /// The L1 fee for the block. - pub(crate) l1_fee: Option, - /// The L1 data gas for the block. - pub(crate) l1_data_gas: Option, -} - -impl OptimismTxMeta { - /// Creates a new [`OptimismTxMeta`]. - pub(crate) const fn new( - l1_block_info: Option, - l1_fee: Option, - l1_data_gas: Option, - ) -> Self { - Self { l1_block_info, l1_fee, l1_data_gas } - } -} diff --git a/crates/rpc/rpc/src/eth/api/sign.rs b/crates/rpc/rpc/src/eth/api/sign.rs deleted file mode 100644 index 5cbdefa41c9e..000000000000 --- a/crates/rpc/rpc/src/eth/api/sign.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Contains RPC handler implementations specific to sign endpoints - -use crate::{ - eth::{ - error::{EthResult, SignError}, - signer::{DevSigner, EthSigner}, - }, - EthApi, -}; -use alloy_dyn_abi::TypedData; -use reth_primitives::{Address, Bytes}; -use serde_json::Value; - -impl EthApi { - pub(crate) async fn sign(&self, account: Address, message: Bytes) -> EthResult { - Ok(self.find_signer(&account)?.sign(account, &message).await?.to_hex_bytes()) - } - - pub(crate) fn sign_typed_data(&self, data: Value, account: Address) -> EthResult { - Ok(self - .find_signer(&account)? - .sign_typed_data( - account, - &serde_json::from_value::(data) - .map_err(|_| SignError::InvalidTypedData)?, - )? - .to_hex_bytes()) - } - - pub(crate) fn find_signer( - &self, - account: &Address, - ) -> Result, SignError> { - self.inner - .signers - .read() - .iter() - .find(|signer| signer.is_signer_for(account)) - .map(|signer| dyn_clone::clone_box(&**signer)) - .ok_or(SignError::NoAccount) - } - - /// Generates 20 random developer accounts. - /// Used in DEV mode. - pub fn with_dev_accounts(&self) { - let mut signers = self.inner.signers.write(); - *signers = DevSigner::random_signers(20); - } -} diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index db3537ad2b39..919c0c36f598 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,28 +1,31 @@ //! `Eth` bundle implementation and helpers. -use crate::eth::{ - error::{EthApiError, EthResult, RpcInvalidTransactionError}, - revm_utils::FillableTransaction, - utils::recover_raw_transaction, - EthTransactions, -}; +use std::sync::Arc; + use jsonrpsee::core::RpcResult; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, keccak256, revm_primitives::db::{DatabaseCommit, DatabaseRef}, PooledTransactionsElement, U256, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_api::EthCallBundleApiServer; -use reth_rpc_types::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; +use reth_rpc_types::mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, primitives::{ResultAndState, TxEnv}, }; -use revm_primitives::{EnvWithHandlerCfg, MAX_BLOB_GAS_PER_BLOCK}; -use std::sync::Arc; +use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; + +use reth_provider::{ChainSpecProvider, HeaderProvider}; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + EthCallBundleApiServer, +}; +use reth_rpc_eth_types::{ + utils::recover_raw_transaction, EthApiError, EthResult, RpcInvalidTransactionError, +}; /// `Eth` bundle implementation. pub struct EthBundle { @@ -39,14 +42,22 @@ impl EthBundle { impl EthBundle where - Eth: EthTransactions + 'static, + Eth: EthTransactions + LoadPendingBlock + Call + 'static, { /// Simulates a bundle of transactions at the top of a given block number with the state of /// another (or the same) block. This can be used to simulate future blocks with the current /// state, or it can be used to simulate a past block. The sender is responsible for signing the /// transactions and using the correct nonce and ensuring validity pub async fn call_bundle(&self, bundle: EthCallBundle) -> EthResult { - let EthCallBundle { txs, block_number, state_block_number, timestamp } = bundle; + let EthCallBundle { + txs, + block_number, + state_block_number, + timestamp, + gas_limit, + difficulty, + base_fee, + } = bundle; if txs.is_empty() { return Err(EthApiError::InvalidParams( EthBundleError::EmptyBundleTransactions.to_string(), @@ -86,6 +97,7 @@ where } let block_id: reth_rpc_types::BlockId = state_block_number.into(); + // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.inner.eth_api.evm_env_at(block_id).await?; // need to adjust the timestamp for the next block @@ -96,10 +108,37 @@ where block_env.timestamp += U256::from(2); } + if let Some(difficulty) = difficulty { + block_env.difficulty = U256::from(difficulty); + } + + if let Some(gas_limit) = gas_limit { + block_env.gas_limit = U256::from(gas_limit); + } + + if let Some(base_fee) = base_fee { + block_env.basefee = U256::from(base_fee); + } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { + let parent_block = block_env.number.saturating_to::(); + // here we need to fetch the _next_ block's basefee based on the parent block + let parent = LoadPendingBlock::provider(&self.inner.eth_api) + .header_by_number(parent_block)? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + if let Some(base_fee) = parent.next_block_base_fee( + LoadPendingBlock::provider(&self.inner.eth_api) + .chain_spec() + .base_fee_params_at_block(parent_block), + ) { + block_env.basefee = U256::from(base_fee); + } + } + let state_block_number = block_env.number; // use the block number of the request block_env.number = U256::from(block_number); + let eth_api = self.inner.eth_api.clone(); + self.inner .eth_api .spawn_with_state_at_block(at, move |state| { @@ -117,8 +156,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hash_bytes = Vec::with_capacity(32 * transactions.len()); - let mut evm = - revm::Evm::builder().with_db(db).with_env_with_handler_cfg(env).build(); + let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -127,17 +165,17 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(MAINNET_KZG_TRUSTED_SETUP.as_ref()) + tx.validate(EnvKzgSettings::Default.get()) .map_err(|e| EthApiError::InvalidParams(e.to_string()))?; } - let tx = tx.into_ecrecovered_transaction(signer); + let tx = tx.into_transaction(); hash_bytes.extend_from_slice(tx.hash().as_slice()); let gas_price = tx .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow)?; - tx.try_fill_tx_env(evm.tx_mut())?; + Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact()?; let gas_used = result.gas_used(); @@ -168,7 +206,7 @@ where let tx_res = EthCallBundleTransactionResult { coinbase_diff, eth_sent_to_coinbase, - from_address: tx.signer(), + from_address: signer, gas_fees, gas_price: U256::from(gas_price), gas_used, @@ -214,7 +252,7 @@ where #[async_trait::async_trait] impl EthCallBundleApiServer for EthBundle where - Eth: EthTransactions + 'static, + Eth: EthTransactions + LoadPendingBlock + Call + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { Ok(Self::call_bundle(self, request).await?) diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs new file mode 100644 index 000000000000..36030741ff3e --- /dev/null +++ b/crates/rpc/rpc/src/eth/core.rs @@ -0,0 +1,611 @@ +//! Implementation of the [`jsonrpsee`] generated [`EthApiServer`](crate::EthApi) trait +//! Handles RPC requests for the `eth_` namespace. + +use futures::Future; +use std::sync::Arc; + +use derive_more::Deref; +use reth_primitives::{BlockNumberOrTag, U256}; +use reth_provider::BlockReaderIdExt; +use reth_rpc_eth_api::{ + helpers::{transaction::UpdateRawTxForwarder, EthSigner, SpawnBlocking}, + RawTransactionForwarder, +}; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock}; +use reth_tasks::{ + pool::{BlockingTaskGuard, BlockingTaskPool}, + TaskSpawner, TokioTaskExecutor, +}; +use tokio::sync::{AcquireError, Mutex, OwnedSemaphorePermit}; + +use crate::eth::DevSigner; + +/// `Eth` API implementation. +/// +/// This type provides the functionality for handling `eth_` related requests. +/// These are implemented two-fold: Core functionality is implemented as +/// [`EthApiSpec`](reth_rpc_eth_api::helpers::EthApiSpec) trait. Additionally, the required server +/// implementations (e.g. [`EthApiServer`](reth_rpc_eth_api::EthApiServer)) are implemented +/// separately in submodules. The rpc handler implementation can then delegate to the main impls. +/// This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone or in other +/// network handlers (for example ipc). +#[derive(Deref)] +pub struct EthApi { + /// All nested fields bundled together. + pub(super) inner: Arc>, +} + +impl EthApi +where + Provider: BlockReaderIdExt, +{ + /// Creates a new, shareable instance using the default tokio task spawner. + #[allow(clippy::too_many_arguments)] + pub fn new( + provider: Provider, + pool: Pool, + network: Network, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, + gas_cap: impl Into, + eth_proof_window: u64, + blocking_task_pool: BlockingTaskPool, + fee_history_cache: FeeHistoryCache, + evm_config: EvmConfig, + raw_transaction_forwarder: Option>, + proof_permits: usize, + ) -> Self { + Self::with_spawner( + provider, + pool, + network, + eth_cache, + gas_oracle, + gas_cap.into().into(), + eth_proof_window, + Box::::default(), + blocking_task_pool, + fee_history_cache, + evm_config, + raw_transaction_forwarder, + proof_permits, + ) + } + + /// Creates a new, shareable instance. + #[allow(clippy::too_many_arguments)] + pub fn with_spawner( + provider: Provider, + pool: Pool, + network: Network, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, + gas_cap: u64, + eth_proof_window: u64, + task_spawner: Box, + blocking_task_pool: BlockingTaskPool, + fee_history_cache: FeeHistoryCache, + evm_config: EvmConfig, + raw_transaction_forwarder: Option>, + proof_permits: usize, + ) -> Self { + // get the block number of the latest block + let latest_block = provider + .header_by_number_or_tag(BlockNumberOrTag::Latest) + .ok() + .flatten() + .map(|header| header.number) + .unwrap_or_default(); + + let inner = EthApiInner { + provider, + pool, + network, + signers: parking_lot::RwLock::new(Default::default()), + eth_cache, + gas_oracle, + gas_cap, + eth_proof_window, + starting_block: U256::from(latest_block), + task_spawner, + pending_block: Default::default(), + blocking_task_pool, + fee_history_cache, + evm_config, + raw_transaction_forwarder: parking_lot::RwLock::new(raw_transaction_forwarder), + blocking_task_guard: BlockingTaskGuard::new(proof_permits), + }; + + Self { inner: Arc::new(inner) } + } +} + +impl std::fmt::Debug + for EthApi +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EthApi").finish_non_exhaustive() + } +} + +impl Clone for EthApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +impl SpawnBlocking + for EthApi +where + Self: Clone + Send + Sync + 'static, +{ + #[inline] + fn io_task_spawner(&self) -> impl reth_tasks::TaskSpawner { + self.inner.task_spawner() + } + + #[inline] + fn tracing_task_pool(&self) -> &reth_tasks::pool::BlockingTaskPool { + self.inner.blocking_task_pool() + } + + fn acquire_owned( + &self, + ) -> impl Future> + Send { + self.blocking_task_guard.clone().acquire_owned() + } + + fn acquire_many_owned( + &self, + n: u32, + ) -> impl Future> + Send { + self.blocking_task_guard.clone().acquire_many_owned(n) + } +} + +impl EthApi { + /// Generates 20 random developer accounts. + /// Used in DEV mode. + pub fn with_dev_accounts(&self) { + let mut signers = self.inner.signers.write(); + *signers = DevSigner::random_signers(20); + } +} + +/// Container type `EthApi` +#[allow(missing_debug_implementations)] +pub struct EthApiInner { + /// The transaction pool. + pool: Pool, + /// The provider that can interact with the chain. + provider: Provider, + /// An interface to interact with the network + network: Network, + /// All configured Signers + signers: parking_lot::RwLock>>, + /// The async cache frontend for eth related data + eth_cache: EthStateCache, + /// The async gas oracle frontend for gas price suggestions + gas_oracle: GasPriceOracle, + /// Maximum gas limit for `eth_call` and call tracing RPC methods. + gas_cap: u64, + /// The maximum number of blocks into the past for generating state proofs. + eth_proof_window: u64, + /// The block number at which the node started + starting_block: U256, + /// The type that can spawn tasks which would otherwise block. + task_spawner: Box, + /// Cached pending block if any + pending_block: Mutex>, + /// A pool dedicated to CPU heavy blocking tasks. + blocking_task_pool: BlockingTaskPool, + /// Cache for block fees history + fee_history_cache: FeeHistoryCache, + /// The type that defines how to configure the EVM + evm_config: EvmConfig, + /// Allows forwarding received raw transactions + raw_transaction_forwarder: parking_lot::RwLock>>, + /// Guard for getproof calls + blocking_task_guard: BlockingTaskGuard, +} + +impl EthApiInner { + /// Returns a handle to data on disk. + #[inline] + pub const fn provider(&self) -> &Provider { + &self.provider + } + + /// Returns a handle to data in memory. + #[inline] + pub const fn cache(&self) -> &EthStateCache { + &self.eth_cache + } + + /// Returns a handle to the pending block. + #[inline] + pub const fn pending_block(&self) -> &Mutex> { + &self.pending_block + } + + /// Returns a handle to the task spawner. + #[inline] + pub const fn task_spawner(&self) -> &dyn TaskSpawner { + &*self.task_spawner + } + + /// Returns a handle to the blocking thread pool. + #[inline] + pub const fn blocking_task_pool(&self) -> &BlockingTaskPool { + &self.blocking_task_pool + } + + /// Returns a handle to the EVM config. + #[inline] + pub const fn evm_config(&self) -> &EvmConfig { + &self.evm_config + } + + /// Returns a handle to the transaction pool. + #[inline] + pub const fn pool(&self) -> &Pool { + &self.pool + } + + /// Returns a handle to the transaction forwarder. + #[inline] + pub fn raw_tx_forwarder(&self) -> Option> { + self.raw_transaction_forwarder.read().clone() + } + + /// Returns the gas cap. + #[inline] + pub const fn gas_cap(&self) -> u64 { + self.gas_cap + } + + /// Returns a handle to the gas oracle. + #[inline] + pub const fn gas_oracle(&self) -> &GasPriceOracle { + &self.gas_oracle + } + + /// Returns a handle to the fee history cache. + #[inline] + pub const fn fee_history_cache(&self) -> &FeeHistoryCache { + &self.fee_history_cache + } + + /// Returns a handle to the signers. + #[inline] + pub const fn signers(&self) -> &parking_lot::RwLock>> { + &self.signers + } + + /// Returns the starting block. + #[inline] + pub const fn starting_block(&self) -> U256 { + self.starting_block + } + + /// Returns the inner `Network` + #[inline] + pub const fn network(&self) -> &Network { + &self.network + } + + /// The maximum number of blocks into the past for generating state proofs. + #[inline] + pub const fn eth_proof_window(&self) -> u64 { + self.eth_proof_window + } +} + +impl UpdateRawTxForwarder + for EthApiInner +{ + fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { + self.raw_transaction_forwarder.write().replace(forwarder); + } +} + +#[cfg(test)] +mod tests { + use jsonrpsee_types::error::INVALID_PARAMS_CODE; + use reth_chainspec::BaseFeeParams; + use reth_evm_ethereum::EthEvmConfig; + use reth_network_api::noop::NoopNetwork; + use reth_primitives::{ + constants::ETHEREUM_BLOCK_GAS_LIMIT, Block, BlockNumberOrTag, Header, TransactionSigned, + B256, U64, + }; + use reth_provider::{ + test_utils::{MockEthProvider, NoopProvider}, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, + }; + use reth_rpc_eth_api::EthApiServer; + use reth_rpc_eth_types::{ + EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + }; + use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_rpc_types::FeeHistory; + use reth_tasks::pool::BlockingTaskPool; + use reth_testing_utils::{generators, generators::Rng}; + use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + + use crate::EthApi; + + fn build_test_eth_api< + P: BlockReaderIdExt + + BlockReader + + ChainSpecProvider + + EvmEnvProvider + + StateProviderFactory + + Unpin + + Clone + + 'static, + >( + provider: P, + ) -> EthApi { + let evm_config = EthEvmConfig::default(); + let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config); + let fee_history_cache = + FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + + EthApi::new( + provider.clone(), + testing_pool(), + NoopNetwork::default(), + cache.clone(), + GasPriceOracle::new(provider, Default::default(), cache), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + fee_history_cache, + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ) + } + + // Function to prepare the EthApi with mock data + fn prepare_eth_api( + newest_block: u64, + mut oldest_block: Option, + block_count: u64, + mock_provider: MockEthProvider, + ) -> (EthApi, Vec, Vec) { + let mut rng = generators::rng(); + + // Build mock data + let mut gas_used_ratios = Vec::new(); + let mut base_fees_per_gas = Vec::new(); + let mut last_header = None; + let mut parent_hash = B256::default(); + + for i in (0..block_count).rev() { + let hash = rng.gen(); + let gas_limit: u64 = rng.gen(); + let gas_used: u64 = rng.gen(); + // Note: Generates a u32 to avoid overflows later + let base_fee_per_gas: Option = rng.gen::().then(|| rng.gen::() as u64); + + let header = Header { + number: newest_block - i, + gas_limit, + gas_used, + base_fee_per_gas, + parent_hash, + ..Default::default() + }; + last_header = Some(header.clone()); + parent_hash = hash; + + let mut transactions = vec![]; + for _ in 0..100 { + let random_fee: u128 = rng.gen(); + + if let Some(base_fee_per_gas) = header.base_fee_per_gas { + let transaction = TransactionSigned { + transaction: reth_primitives::Transaction::Eip1559( + reth_primitives::TxEip1559 { + max_priority_fee_per_gas: random_fee, + max_fee_per_gas: random_fee + base_fee_per_gas as u128, + ..Default::default() + }, + ), + ..Default::default() + }; + + transactions.push(transaction); + } else { + let transaction = TransactionSigned { + transaction: reth_primitives::Transaction::Legacy(Default::default()), + ..Default::default() + }; + + transactions.push(transaction); + } + } + + mock_provider.add_block( + hash, + Block { header: header.clone(), body: transactions, ..Default::default() }, + ); + mock_provider.add_header(hash, header); + + oldest_block.get_or_insert(hash); + gas_used_ratios.push(gas_used as f64 / gas_limit as f64); + base_fees_per_gas.push(base_fee_per_gas.map(|fee| fee as u128).unwrap_or_default()); + } + + // Add final base fee (for the next block outside of the request) + let last_header = last_header.unwrap(); + base_fees_per_gas.push(BaseFeeParams::ethereum().next_block_base_fee( + last_header.gas_used as u128, + last_header.gas_limit as u128, + last_header.base_fee_per_gas.unwrap_or_default() as u128, + )); + + let eth_api = build_test_eth_api(mock_provider); + + (eth_api, base_fees_per_gas, gas_used_ratios) + } + + /// Invalid block range + #[tokio::test] + async fn test_fee_history_empty() { + let response = as EthApiServer>::fee_history( + &build_test_eth_api(NoopProvider::default()), + U64::from(1), + BlockNumberOrTag::Latest, + None, + ) + .await; + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Invalid block range (request is before genesis) + async fn test_fee_history_invalid_block_range_before_genesis() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let response = as EthApiServer>::fee_history( + ð_api, + U64::from(newest_block + 1), + newest_block.into(), + Some(vec![10.0]), + ) + .await; + + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Invalid block range (request is in the future) + async fn test_fee_history_invalid_block_range_in_future() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let response = as EthApiServer>::fee_history( + ð_api, + U64::from(1), + (newest_block + 1000).into(), + Some(vec![10.0]), + ) + .await; + + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Requesting no block should result in a default response + async fn test_fee_history_no_block_requested() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let response = as EthApiServer>::fee_history( + ð_api, + U64::from(0), + newest_block.into(), + None, + ) + .await + .unwrap(); + assert_eq!( + response, + FeeHistory::default(), + "none: requesting no block should yield a default response" + ); + } + + #[tokio::test] + /// Requesting a single block should return 1 block (+ base fee for the next block over) + async fn test_fee_history_single_block() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, base_fees_per_gas, gas_used_ratios) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let fee_history = + eth_api.fee_history(U64::from(1), newest_block.into(), None).await.unwrap(); + assert_eq!( + fee_history.base_fee_per_gas, + &base_fees_per_gas[base_fees_per_gas.len() - 2..], + "one: base fee per gas is incorrect" + ); + assert_eq!( + fee_history.base_fee_per_gas.len(), + 2, + "one: should return base fee of the next block as well" + ); + assert_eq!( + &fee_history.gas_used_ratio, + &gas_used_ratios[gas_used_ratios.len() - 1..], + "one: gas used ratio is incorrect" + ); + assert_eq!(fee_history.oldest_block, newest_block, "one: oldest block is incorrect"); + assert!( + fee_history.reward.is_none(), + "one: no percentiles were requested, so there should be no rewards result" + ); + } + + /// Requesting all blocks should be ok + #[tokio::test] + async fn test_fee_history_all_blocks() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, base_fees_per_gas, gas_used_ratios) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let fee_history = + eth_api.fee_history(U64::from(block_count), newest_block.into(), None).await.unwrap(); + + assert_eq!( + &fee_history.base_fee_per_gas, &base_fees_per_gas, + "all: base fee per gas is incorrect" + ); + assert_eq!( + fee_history.base_fee_per_gas.len() as u64, + block_count + 1, + "all: should return base fee of the next block as well" + ); + assert_eq!( + &fee_history.gas_used_ratio, &gas_used_ratios, + "all: gas used ratio is incorrect" + ); + assert_eq!( + fee_history.oldest_block, + newest_block - block_count + 1, + "all: oldest block is incorrect" + ); + assert!( + fee_history.reward.is_none(), + "all: no percentiles were requested, so there should be no rewards result" + ); + } +} diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 7c76b3d91868..734749e50eb1 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,33 +1,31 @@ -use super::cache::EthStateCache; -use crate::{ - eth::{ - error::EthApiError, - logs_utils::{self, append_matching_block_logs}, - }, - result::{rpc_error_with_code, ToRpcResult}, - EthSubscriptionIdProvider, +//! `eth_` `Filter` RPC handler implementation + +use std::{ + collections::HashMap, + fmt, + iter::StepBy, + ops::RangeInclusive, + sync::Arc, + time::{Duration, Instant}, }; -use core::fmt; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; -use reth_primitives::{ChainInfo, IntoRecoveredTransaction, TxHash}; +use reth_chainspec::ChainInfo; +use reth_primitives::{IntoRecoveredTransaction, TxHash}; use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; -use reth_rpc_api::EthFilterApiServer; +use reth_rpc_eth_api::EthFilterApiServer; +use reth_rpc_eth_types::{ + logs_utils::{self, append_matching_block_logs}, + EthApiError, EthFilterError, EthStateCache, EthSubscriptionIdProvider, +}; +use reth_rpc_server_types::ToRpcResult; use reth_rpc_types::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, PendingTransactionFilterKind, }; - use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; -use std::{ - collections::HashMap, - iter::StepBy, - ops::RangeInclusive, - sync::Arc, - time::{Duration, Instant}, -}; use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, @@ -131,7 +129,7 @@ where ::Transaction: 'static, { /// Returns all the filter changes for the given id, if any - pub async fn filter_changes(&self, id: FilterId) -> Result { + pub async fn filter_changes(&self, id: FilterId) -> Result { let info = self.inner.provider.chain_info()?; let best_number = info.best_number; @@ -139,7 +137,7 @@ where // the last time changes were polled, in other words the best block at last poll + 1 let (start_block, kind) = { let mut filters = self.inner.active_filters.inner.lock().await; - let filter = filters.get_mut(&id).ok_or(FilterError::FilterNotFound(id))?; + let filter = filters.get_mut(&id).ok_or(EthFilterError::FilterNotFound(id))?; if filter.block > best_number { // no new blocks since the last poll @@ -203,16 +201,16 @@ where /// Returns an error if no matching log filter exists. /// /// Handler for `eth_getFilterLogs` - pub async fn filter_logs(&self, id: FilterId) -> Result, FilterError> { + pub async fn filter_logs(&self, id: FilterId) -> Result, EthFilterError> { let filter = { let filters = self.inner.active_filters.inner.lock().await; if let FilterKind::Log(ref filter) = - filters.get(&id).ok_or_else(|| FilterError::FilterNotFound(id.clone()))?.kind + filters.get(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?.kind { *filter.clone() } else { // Not a log filter - return Err(FilterError::FilterNotFound(id)); + return Err(EthFilterError::FilterNotFound(id)) } }; @@ -346,7 +344,7 @@ where Pool: TransactionPool + 'static, { /// Returns logs matching given filter object. - async fn logs_for_filter(&self, filter: Filter) -> Result, FilterError> { + async fn logs_for_filter(&self, filter: Filter) -> Result, EthFilterError> { match filter.block_option { FilterBlockOption::AtBlockHash(block_hash) => { // for all matching logs in the block @@ -427,16 +425,16 @@ where from_block: u64, to_block: u64, chain_info: ChainInfo, - ) -> Result, FilterError> { + ) -> Result, EthFilterError> { trace!(target: "rpc::eth::filter", from=from_block, to=to_block, ?filter, "finding logs in range"); let best_number = chain_info.best_number; if to_block < from_block { - return Err(FilterError::InvalidBlockRangeParams); + return Err(EthFilterError::InvalidBlockRangeParams) } if to_block - from_block > self.max_blocks_per_filter { - return Err(FilterError::QueryExceedsMaxBlocks(self.max_blocks_per_filter)); + return Err(EthFilterError::QueryExceedsMaxBlocks(self.max_blocks_per_filter)) } let mut all_logs = Vec::new(); @@ -504,7 +502,7 @@ where // logs of a single block let is_multi_block_range = from_block != to_block; if is_multi_block_range && all_logs.len() > self.max_logs_per_response { - return Err(FilterError::QueryExceedsMaxResults( + return Err(EthFilterError::QueryExceedsMaxResults( self.max_logs_per_response, )); } @@ -681,51 +679,6 @@ enum FilterKind { PendingTransaction(PendingTransactionKind), } -/// Errors that can occur in the handler implementation -#[derive(Debug, thiserror::Error)] -pub enum FilterError { - #[error("filter not found")] - FilterNotFound(FilterId), - #[error("invalid block range params")] - InvalidBlockRangeParams, - #[error("query exceeds max block range {0}")] - QueryExceedsMaxBlocks(u64), - #[error("query exceeds max results {0}")] - QueryExceedsMaxResults(usize), - #[error(transparent)] - EthAPIError(#[from] EthApiError), - /// Error thrown when a spawned task failed to deliver a response. - #[error("internal filter error")] - InternalError, -} - -// convert the error -impl From for jsonrpsee::types::error::ErrorObject<'static> { - fn from(err: FilterError) -> Self { - match err { - FilterError::FilterNotFound(_) => rpc_error_with_code( - jsonrpsee::types::error::INVALID_PARAMS_CODE, - "filter not found", - ), - err @ FilterError::InternalError => { - rpc_error_with_code(jsonrpsee::types::error::INTERNAL_ERROR_CODE, err.to_string()) - } - FilterError::EthAPIError(err) => err.into(), - err @ FilterError::InvalidBlockRangeParams | - err @ FilterError::QueryExceedsMaxBlocks(_) | - err @ FilterError::QueryExceedsMaxResults(_) => { - rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) - } - } - } -} - -impl From for FilterError { - fn from(err: ProviderError) -> Self { - Self::EthAPIError(err.into()) - } -} - /// An iterator that yields _inclusive_ block ranges of a given step size #[derive(Debug)] struct BlockRangeInclusiveIter { diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs new file mode 100644 index 000000000000..2ce6c7ed2e93 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -0,0 +1,34 @@ +//! Contains RPC handler implementations specific to blocks. + +use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_rpc_eth_api::helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_types::EthStateCache; + +use crate::EthApi; + +impl EthBlocks for EthApi +where + Self: LoadBlock, + Provider: HeaderProvider, +{ + #[inline] + fn provider(&self) -> impl reth_provider::HeaderProvider { + self.inner.provider() + } +} + +impl LoadBlock for EthApi +where + Self: LoadPendingBlock + SpawnBlocking, + Provider: BlockReaderIdExt, +{ + #[inline] + fn provider(&self) -> impl BlockReaderIdExt { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs new file mode 100644 index 000000000000..c442c46b4b80 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -0,0 +1,27 @@ +//! Contains RPC handler implementations specific to endpoints that call/execute within evm. + +use reth_evm::ConfigureEvm; +use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; + +use crate::EthApi; + +impl EthCall for EthApi where + Self: Call + LoadPendingBlock +{ +} + +impl Call for EthApi +where + Self: LoadState + SpawnBlocking, + EvmConfig: ConfigureEvm, +{ + #[inline] + fn call_gas_limit(&self) -> u64 { + self.inner.gas_cap() + } + + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs new file mode 100644 index 000000000000..7380f4ea2c20 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -0,0 +1,39 @@ +//! Contains RPC handler implementations for fee history. + +use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; + +use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; + +use crate::EthApi; + +impl EthFees for EthApi where + Self: LoadFee +{ +} + +impl LoadFee for EthApi +where + Self: LoadBlock, + Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider, +{ + #[inline] + fn provider(&self) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + #[inline] + fn gas_oracle(&self) -> &GasPriceOracle { + self.inner.gas_oracle() + } + + #[inline] + fn fee_history_cache(&self) -> &FeeHistoryCache { + self.inner.fee_history_cache() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/mod.rs b/crates/rpc/rpc/src/eth/helpers/mod.rs new file mode 100644 index 000000000000..4c86e2b5fa18 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/mod.rs @@ -0,0 +1,18 @@ +//! The entire implementation of the namespace is quite large, hence it is divided across several +//! files. + +pub mod signer; + +mod block; +mod call; +mod fees; +#[cfg(feature = "optimism")] +pub mod optimism; +#[cfg(not(feature = "optimism"))] +mod pending_block; +#[cfg(not(feature = "optimism"))] +mod receipt; +mod spec; +mod state; +mod trace; +mod transaction; diff --git a/crates/rpc/rpc/src/eth/helpers/optimism.rs b/crates/rpc/rpc/src/eth/helpers/optimism.rs new file mode 100644 index 000000000000..751c06463a00 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/optimism.rs @@ -0,0 +1,231 @@ +//! Loads and formats OP transaction RPC response. + +use jsonrpsee_types::error::ErrorObject; +use reth_evm::ConfigureEvm; +use reth_evm_optimism::RethL1BlockInfo; +use reth_primitives::{ + BlockNumber, Receipt, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, B256, +}; +use reth_provider::{ + BlockIdReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, + StateProviderFactory, +}; +use reth_rpc_types::{AnyTransactionReceipt, OptimismTransactionReceiptFields, ToRpcError}; +use reth_transaction_pool::TransactionPool; +use revm::L1BlockInfo; +use revm_primitives::{BlockEnv, ExecutionResult}; + +use reth_rpc_eth_api::helpers::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, PendingBlock, ReceiptBuilder}; +use reth_rpc_server_types::result::internal_rpc_err; + +use crate::EthApi; + +/// L1 fee and data gas for a transaction, along with the L1 block info. +#[derive(Debug, Default, Clone)] +pub struct OptimismTxMeta { + /// The L1 block info. + pub l1_block_info: Option, + /// The L1 fee for the block. + pub l1_fee: Option, + /// The L1 data gas for the block. + pub l1_data_gas: Option, +} + +impl OptimismTxMeta { + /// Creates a new [`OptimismTxMeta`]. + pub const fn new( + l1_block_info: Option, + l1_fee: Option, + l1_data_gas: Option, + ) -> Self { + Self { l1_block_info, l1_fee, l1_data_gas } + } +} + +impl EthApi +where + Provider: BlockIdReader + ChainSpecProvider, +{ + /// Builds [`OptimismTxMeta`] object using the provided [`TransactionSigned`], L1 block + /// info and block timestamp. The [`L1BlockInfo`] is used to calculate the l1 fee and l1 data + /// gas for the transaction. If the [`L1BlockInfo`] is not provided, the meta info will be + /// empty. + pub fn build_op_tx_meta( + &self, + tx: &TransactionSigned, + l1_block_info: Option, + block_timestamp: u64, + ) -> EthResult { + let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; + + let (l1_fee, l1_data_gas) = if !tx.is_deposit() { + let envelope_buf = tx.envelope_encoded(); + + let inner_l1_fee = l1_block_info + .l1_tx_data_fee( + &self.inner.provider().chain_spec(), + block_timestamp, + &envelope_buf, + tx.is_deposit(), + ) + .map_err(|_| OptimismEthApiError::L1BlockFeeError)?; + let inner_l1_data_gas = l1_block_info + .l1_data_gas(&self.inner.provider().chain_spec(), block_timestamp, &envelope_buf) + .map_err(|_| OptimismEthApiError::L1BlockGasError)?; + ( + Some(inner_l1_fee.saturating_to::()), + Some(inner_l1_data_gas.saturating_to::()), + ) + } else { + (None, None) + }; + + Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) + } +} + +impl LoadReceipt for EthApi +where + Self: Send + Sync, + Provider: BlockIdReader + ChainSpecProvider, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + async fn build_transaction_receipt( + &self, + tx: TransactionSigned, + meta: TransactionMeta, + receipt: Receipt, + ) -> EthResult { + let (block, receipts) = self + .cache() + .get_block_and_receipts(meta.block_hash) + .await? + .ok_or(EthApiError::UnknownBlockNumber)?; + + let block = block.unseal(); + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); + let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; + + let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts)?; + let resp_builder = op_receipt_fields(resp_builder, &tx, &receipt, optimism_tx_meta); + + Ok(resp_builder.build()) + } +} + +/// Applies OP specific fields to a receipt. +fn op_receipt_fields( + resp_builder: ReceiptBuilder, + tx: &TransactionSigned, + receipt: &Receipt, + optimism_tx_meta: OptimismTxMeta, +) -> ReceiptBuilder { + let mut op_fields = OptimismTransactionReceiptFields::default(); + + if tx.is_deposit() { + op_fields.deposit_nonce = receipt.deposit_nonce.map(reth_primitives::U64::from); + op_fields.deposit_receipt_version = + receipt.deposit_receipt_version.map(reth_primitives::U64::from); + } else if let Some(l1_block_info) = optimism_tx_meta.l1_block_info { + op_fields.l1_fee = optimism_tx_meta.l1_fee; + op_fields.l1_gas_used = optimism_tx_meta.l1_data_gas.map(|dg| { + dg + l1_block_info.l1_fee_overhead.unwrap_or_default().saturating_to::() + }); + op_fields.l1_fee_scalar = Some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); + op_fields.l1_gas_price = Some(l1_block_info.l1_base_fee.saturating_to()); + } + + resp_builder.add_other_fields(op_fields.into()) +} + +impl LoadPendingBlock + for EthApi +where + Self: SpawnBlocking, + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, + Pool: TransactionPool, + EvmConfig: ConfigureEvm, +{ + #[inline] + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory { + self.inner.provider() + } + + #[inline] + fn pool(&self) -> impl reth_transaction_pool::TransactionPool { + self.inner.pool() + } + + #[inline] + fn pending_block(&self) -> &tokio::sync::Mutex> { + self.inner.pending_block() + } + + #[inline] + fn evm_config(&self) -> &impl reth_evm::ConfigureEvm { + self.inner.evm_config() + } + + fn assemble_receipt( + &self, + tx: &TransactionSignedEcRecovered, + result: ExecutionResult, + cumulative_gas_used: u64, + ) -> Receipt { + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + deposit_nonce: None, + deposit_receipt_version: None, + } + } + + fn receipts_root( + &self, + _block_env: &BlockEnv, + execution_outcome: &ExecutionOutcome, + block_number: BlockNumber, + ) -> B256 { + execution_outcome + .optimism_receipts_root_slow( + block_number, + self.provider().chain_spec().as_ref(), + _block_env.timestamp.to::(), + ) + .expect("Block is present") + } +} + +/// Optimism specific errors, that extend [`EthApiError`]. +#[derive(Debug, thiserror::Error)] +pub enum OptimismEthApiError { + /// Thrown when calculating L1 gas fee. + #[error("failed to calculate l1 gas fee")] + L1BlockFeeError, + /// Thrown when calculating L1 gas used + #[error("failed to calculate l1 gas used")] + L1BlockGasError, +} + +impl ToRpcError for OptimismEthApiError { + fn to_rpc_error(&self) -> ErrorObject<'static> { + match self { + Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), + } + } +} + +impl From for EthApiError { + fn from(err: OptimismEthApiError) -> Self { + Self::other(err) + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs new file mode 100644 index 000000000000..d1a47da75853 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -0,0 +1,40 @@ +//! Support for building a pending block with transactions from local view of mempool. + +use reth_evm::ConfigureEvm; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_types::PendingBlock; +use reth_transaction_pool::TransactionPool; + +use crate::EthApi; + +impl LoadPendingBlock + for EthApi +where + Self: SpawnBlocking, + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, + Pool: TransactionPool, + EvmConfig: reth_evm::ConfigureEvm, +{ + #[inline] + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory { + self.inner.provider() + } + + #[inline] + fn pool(&self) -> impl TransactionPool { + self.inner.pool() + } + + #[inline] + fn pending_block(&self) -> &tokio::sync::Mutex> { + self.inner.pending_block() + } + + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs new file mode 100644 index 000000000000..db1fee781fd3 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -0,0 +1,16 @@ +//! Builds an RPC receipt response w.r.t. data layout of network. + +use reth_rpc_eth_api::helpers::LoadReceipt; +use reth_rpc_eth_types::EthStateCache; + +use crate::EthApi; + +impl LoadReceipt for EthApi +where + Self: Send + Sync, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs similarity index 83% rename from crates/rpc/rpc/src/eth/signer.rs rename to crates/rpc/rpc/src/eth/helpers/signer.rs index cffaa01f2f44..a4cb726a2915 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -1,49 +1,20 @@ //! An abstraction over ethereum signers. -use crate::eth::error::SignError; +use std::collections::HashMap; + use alloy_dyn_abi::TypedData; use reth_primitives::{ eip191_hash_message, sign_message, Address, Signature, TransactionSigned, B256, }; +use reth_rpc_eth_api::helpers::{signer::Result, EthSigner}; +use reth_rpc_eth_types::SignError; use reth_rpc_types::TypedTransactionRequest; - -use dyn_clone::DynClone; use reth_rpc_types_compat::transaction::to_primitive_transaction; use secp256k1::SecretKey; -use std::collections::HashMap; - -type Result = std::result::Result; - -/// An Ethereum Signer used via RPC. -#[async_trait::async_trait] -pub(crate) trait EthSigner: Send + Sync + DynClone { - /// Returns the available accounts for this signer. - fn accounts(&self) -> Vec
; - - /// Returns `true` whether this signer can sign for this address - fn is_signer_for(&self, addr: &Address) -> bool { - self.accounts().contains(addr) - } - - /// Returns the signature - async fn sign(&self, address: Address, message: &[u8]) -> Result; - - /// signs a transaction request using the given account in request - fn sign_transaction( - &self, - request: TypedTransactionRequest, - address: &Address, - ) -> Result; - - /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. - fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; -} - -dyn_clone::clone_trait_object!(EthSigner); /// Holds developer keys -#[derive(Clone)] -pub(crate) struct DevSigner { +#[derive(Debug, Clone)] +pub struct DevSigner { addresses: Vec
, accounts: HashMap, } @@ -115,16 +86,18 @@ impl EthSigner for DevSigner { fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result { let encoded = payload.eip712_signing_hash().map_err(|_| SignError::InvalidTypedData)?; - // let b256 = encoded; self.sign_hash(encoded, address) } } #[cfg(test)] mod tests { - use super::*; - use reth_primitives::U256; use std::str::FromStr; + + use reth_primitives::U256; + + use super::*; + fn build_signer() -> DevSigner { let addresses = vec![]; let secret = @@ -136,8 +109,7 @@ mod tests { #[tokio::test] async fn test_sign_type_data() { - let eip_712_example = serde_json::json!( - r#"{ + let eip_712_example = r#"{ "types": { "EIP712Domain": [ { @@ -200,9 +172,8 @@ mod tests { }, "contents": "Hello, Bob!" } - }"# - ); - let data: TypedData = serde_json::from_value(eip_712_example).unwrap(); + }"#; + let data: TypedData = serde_json::from_str(eip_712_example).unwrap(); let signer = build_signer(); let sig = signer.sign_typed_data(Address::default(), &data).unwrap(); let expected = Signature { diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs new file mode 100644 index 000000000000..a93d662eaf6e --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -0,0 +1,65 @@ +use reth_chainspec::ChainInfo; +use reth_errors::{RethError, RethResult}; +use reth_evm::ConfigureEvm; +use reth_network_api::NetworkInfo; +use reth_primitives::{Address, U256, U64}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_rpc_eth_api::helpers::EthApiSpec; +use reth_rpc_types::{SyncInfo, SyncStatus}; +use reth_transaction_pool::TransactionPool; + +use crate::EthApi; + +impl EthApiSpec for EthApi +where + Pool: TransactionPool + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, + Network: NetworkInfo + 'static, + EvmConfig: ConfigureEvm, +{ + /// Returns the current ethereum protocol version. + /// + /// Note: This returns an [`U64`], since this should return as hex string. + async fn protocol_version(&self) -> RethResult { + let status = self.network().network_status().await.map_err(RethError::other)?; + Ok(U64::from(status.protocol_version)) + } + + /// Returns the chain id + fn chain_id(&self) -> U64 { + U64::from(self.network().chain_id()) + } + + /// Returns the current info for the chain + fn chain_info(&self) -> RethResult { + Ok(self.provider().chain_info()?) + } + + fn accounts(&self) -> Vec
{ + self.inner.signers().read().iter().flat_map(|s| s.accounts()).collect() + } + + fn is_syncing(&self) -> bool { + self.network().is_syncing() + } + + /// Returns the [`SyncStatus`] of the network + fn sync_status(&self) -> RethResult { + let status = if self.is_syncing() { + let current_block = U256::from( + self.provider().chain_info().map(|info| info.best_number).unwrap_or_default(), + ); + SyncStatus::Info(SyncInfo { + starting_block: self.inner.starting_block(), + current_block, + highest_block: current_block, + warp_chunks_amount: None, + warp_chunks_processed: None, + }) + } else { + SyncStatus::None + }; + Ok(status) + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs new file mode 100644 index 000000000000..d3a99d2f83ab --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -0,0 +1,113 @@ +//! Contains RPC handler implementations specific to state. + +use reth_provider::StateProviderFactory; +use reth_transaction_pool::TransactionPool; + +use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; +use reth_rpc_eth_types::EthStateCache; + +use crate::EthApi; + +impl EthState for EthApi +where + Self: LoadState + SpawnBlocking, +{ + fn max_proof_window(&self) -> u64 { + self.eth_proof_window() + } +} + +impl LoadState for EthApi +where + Provider: StateProviderFactory, + Pool: TransactionPool, +{ + #[inline] + fn provider(&self) -> impl StateProviderFactory { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + #[inline] + fn pool(&self) -> impl TransactionPool { + self.inner.pool() + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use reth_evm_ethereum::EthEvmConfig; + use reth_primitives::{ + constants::ETHEREUM_BLOCK_GAS_LIMIT, Address, StorageKey, StorageValue, U256, + }; + use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; + use reth_rpc_eth_api::helpers::EthState; + use reth_rpc_eth_types::{ + EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + }; + use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_tasks::pool::BlockingTaskPool; + use reth_transaction_pool::test_utils::testing_pool; + + use super::*; + + #[tokio::test] + async fn test_storage() { + // === Noop === + let pool = testing_pool(); + let evm_config = EthEvmConfig::default(); + + let cache = EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config); + let eth_api = EthApi::new( + NoopProvider::default(), + pool.clone(), + (), + cache.clone(), + GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ); + let address = Address::random(); + let storage = eth_api.storage_at(address, U256::ZERO.into(), None).await.unwrap(); + assert_eq!(storage, U256::ZERO.to_be_bytes()); + + // === Mock === + let mock_provider = MockEthProvider::default(); + let storage_value = StorageValue::from(1337); + let storage_key = StorageKey::random(); + let storage = HashMap::from([(storage_key, storage_value)]); + let account = ExtendedAccount::new(0, U256::ZERO).extend_storage(storage); + mock_provider.add_account(address, account); + + let cache = EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config); + let eth_api = EthApi::new( + mock_provider.clone(), + pool, + (), + cache.clone(), + GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ); + + let storage_key: U256 = storage_key.into(); + let storage = eth_api.storage_at(address, storage_key.into(), None).await.unwrap(); + assert_eq!(storage, storage_value.to_be_bytes()); + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs new file mode 100644 index 000000000000..fe1ee9f13cf4 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -0,0 +1,17 @@ +//! Contains RPC handler implementations specific to tracing. + +use reth_evm::ConfigureEvm; +use reth_rpc_eth_api::helpers::{LoadState, Trace}; + +use crate::EthApi; + +impl Trace for EthApi +where + Self: LoadState, + EvmConfig: ConfigureEvm, +{ + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs new file mode 100644 index 000000000000..872af0cee451 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -0,0 +1,128 @@ +//! Contains RPC handler implementations specific to transactions + +use reth_provider::{BlockReaderIdExt, TransactionsProvider}; +use reth_rpc_eth_api::{ + helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, + RawTransactionForwarder, +}; +use reth_rpc_eth_types::EthStateCache; +use reth_transaction_pool::TransactionPool; + +use crate::EthApi; + +impl EthTransactions + for EthApi +where + Self: LoadTransaction, + Pool: TransactionPool + 'static, + Provider: BlockReaderIdExt, +{ + #[inline] + fn provider(&self) -> impl BlockReaderIdExt { + self.inner.provider() + } + + #[inline] + fn raw_tx_forwarder(&self) -> Option> { + self.inner.raw_tx_forwarder() + } + + #[inline] + fn signers(&self) -> &parking_lot::RwLock>> { + self.inner.signers() + } +} + +impl LoadTransaction + for EthApi +where + Self: SpawnBlocking, + Provider: TransactionsProvider, + Pool: TransactionPool, +{ + type Pool = Pool; + + #[inline] + fn provider(&self) -> impl reth_provider::TransactionsProvider { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + #[inline] + fn pool(&self) -> &Self::Pool { + self.inner.pool() + } +} + +#[cfg(test)] +mod tests { + use reth_evm_ethereum::EthEvmConfig; + use reth_network_api::noop::NoopNetwork; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, hex_literal::hex, Bytes}; + use reth_provider::test_utils::NoopProvider; + use reth_rpc_eth_api::helpers::EthTransactions; + use reth_rpc_eth_types::{ + EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + }; + use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_tasks::pool::BlockingTaskPool; + use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; + + use super::*; + + #[tokio::test] + async fn send_raw_transaction() { + let noop_provider = NoopProvider::default(); + let noop_network_provider = NoopNetwork::default(); + + let pool = testing_pool(); + + let evm_config = EthEvmConfig::default(); + let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config); + let fee_history_cache = + FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + let eth_api = EthApi::new( + noop_provider, + pool.clone(), + noop_network_provider, + cache.clone(), + GasPriceOracle::new(noop_provider, Default::default(), cache.clone()), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + fee_history_cache, + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ); + + // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d + let tx_1 = Bytes::from(hex!("02f871018303579880850555633d1b82520894eee27662c2b8eba3cd936a23f039f3189633e4c887ad591c62bdaeb180c080a07ea72c68abfb8fca1bd964f0f99132ed9280261bdca3e549546c0205e800f7d0a05b4ef3039e9c9b9babc179a1878fb825b5aaf5aed2fa8744854150157b08d6f3")); + + let tx_1_result = eth_api.send_raw_transaction(tx_1).await.unwrap(); + assert_eq!( + pool.len(), + 1, + "expect 1 transactions in the pool, but pool size is {}", + pool.len() + ); + + // https://etherscan.io/tx/0x48816c2f32c29d152b0d86ff706f39869e6c1f01dc2fe59a3c1f9ecf39384694 + let tx_2 = Bytes::from(hex!("02f9043c018202b7843b9aca00850c807d37a08304d21d94ef1c6e67703c7bd7107eed8303fbe6ec2554bf6b881bc16d674ec80000b903c43593564c000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000063e2d99f00000000000000000000000000000000000000000000000000000000000000030b000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000065717fe021ea67801d1088cc80099004b05b64600000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002bc02aaa39b223fe8d0a0e5c4f27ead9083c756cc20001f4a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009e95fd5965fd1f1a6f0d4600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000428dca9537116148616a5a3e44035af17238fe9dc080a0c6ec1e41f5c0b9511c49b171ad4e04c6bb419c74d99fe9891d74126ec6e4e879a032069a753d7a2cfa158df95421724d24c0e9501593c09905abf3699b4a4405ce")); + + let tx_2_result = eth_api.send_raw_transaction(tx_2).await.unwrap(); + assert_eq!( + pool.len(), + 2, + "expect 2 transactions in the pool, but pool size is {}", + pool.len() + ); + + assert!(pool.get(&tx_1_result).is_some(), "tx1 not found in the pool"); + assert!(pool.get(&tx_2_result).is_some(), "tx2 not found in the pool"); + } +} diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 8d8e982c2c79..4e6a0cbb8c75 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -1,28 +1,17 @@ -//! `eth` namespace handler implementation. +//! Sever implementation of `eth` namespace API. -mod api; pub mod bundle; -pub mod cache; -pub mod error; -mod filter; -pub mod gas_oracle; -mod id_provider; -mod logs_utils; -mod pubsub; -pub mod revm_utils; -mod signer; -pub mod traits; -pub(crate) mod utils; - -#[cfg(feature = "optimism")] -pub mod optimism; - -pub use api::{ - fee_history::{fee_history_cache_new_blocks_task, FeeHistoryCache, FeeHistoryCacheConfig}, - EthApi, EthApiSpec, EthTransactions, TransactionSource, RPC_DEFAULT_GAS_CAP, -}; +pub mod core; +pub mod filter; +pub mod helpers; +pub mod pubsub; +/// Implementation of `eth` namespace API. pub use bundle::EthBundle; +pub use core::EthApi; pub use filter::{EthFilter, EthFilterConfig}; -pub use id_provider::EthSubscriptionIdProvider; pub use pubsub::EthPubSub; + +pub use helpers::signer::DevSigner; + +pub use reth_rpc_eth_api::RawTransactionForwarder; diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs deleted file mode 100644 index fb1665b95785..000000000000 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! Optimism specific types. - -use jsonrpsee::types::ErrorObject; -use reth_rpc_types::ToRpcError; - -use crate::{eth::error::EthApiError, result::internal_rpc_err}; - -/// Eth Optimism Api Error -#[cfg(feature = "optimism")] -#[derive(Debug, thiserror::Error)] -pub enum OptimismEthApiError { - /// Thrown when calculating L1 gas fee - #[error("failed to calculate l1 gas fee")] - L1BlockFeeError, - /// Thrown when calculating L1 gas used - #[error("failed to calculate l1 gas used")] - L1BlockGasError, -} - -impl ToRpcError for OptimismEthApiError { - fn to_rpc_error(&self) -> ErrorObject<'static> { - match self { - Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), - } - } -} - -impl From for EthApiError { - fn from(err: OptimismEthApiError) -> Self { - Self::other(err) - } -} diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 3b1c95cd451b..fa04250a9a96 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,9 +1,7 @@ //! `eth_` `PubSub` RPC handler implementation -use crate::{ - eth::logs_utils, - result::{internal_rpc_err, invalid_params_rpc_err}, -}; +use std::sync::Arc; + use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, @@ -11,7 +9,9 @@ use jsonrpsee::{ use reth_network_api::NetworkInfo; use reth_primitives::{IntoRecoveredTransaction, TxHash}; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_api::EthPubSubApiServer; +use reth_rpc_eth_api::pubsub::EthPubSubApiServer; +use reth_rpc_eth_types::logs_utils; +use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types::{ pubsub::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, @@ -22,7 +22,6 @@ use reth_rpc_types::{ use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; use serde::Serialize; -use std::sync::Arc; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, @@ -163,7 +162,7 @@ where BroadcastStream::new(pubsub.chain_events.subscribe_to_canonical_state()); // get current sync status let mut initial_sync_status = pubsub.network.is_syncing(); - let current_sub_res = pubsub.sync_status(initial_sync_status).await; + let current_sub_res = pubsub.sync_status(initial_sync_status); // send the current status immediately let msg = SubscriptionMessage::from_json(¤t_sub_res) @@ -180,7 +179,7 @@ where initial_sync_status = current_syncing; // send a new message now that the status changed - let sync_status = pubsub.sync_status(current_syncing).await; + let sync_status = pubsub.sync_status(current_syncing); let msg = SubscriptionMessage::from_json(&sync_status) .map_err(SubscriptionSerializeError::new)?; if accepted_sink.send(msg).await.is_err() { @@ -197,10 +196,10 @@ where /// Helper to convert a serde error into an [`ErrorObject`] #[derive(Debug, thiserror::Error)] #[error("Failed to serialize subscription item: {0}")] -pub(crate) struct SubscriptionSerializeError(#[from] serde_json::Error); +pub struct SubscriptionSerializeError(#[from] serde_json::Error); impl SubscriptionSerializeError { - pub(crate) const fn new(err: serde_json::Error) -> Self { + const fn new(err: serde_json::Error) -> Self { Self(err) } } @@ -271,7 +270,7 @@ where Provider: BlockReader + 'static, { /// Returns the current sync status for the `syncing` subscription - async fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { + fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { if is_syncing { let current_block = self.provider.chain_info().map(|info| info.best_number).unwrap_or_default(); diff --git a/crates/rpc/rpc/src/eth/traits.rs b/crates/rpc/rpc/src/eth/traits.rs deleted file mode 100644 index 0f73ded3c82f..000000000000 --- a/crates/rpc/rpc/src/eth/traits.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! Additional helper traits that allow for more customization. - -use crate::eth::error::EthResult; -use std::fmt; - -/// A trait that allows for forwarding raw transactions. -/// -/// For example to a sequencer. -#[async_trait::async_trait] -pub trait RawTransactionForwarder: fmt::Debug + Send + Sync + 'static { - /// Forwards raw transaction bytes for `eth_sendRawTransaction` - async fn forward_raw_transaction(&self, raw: &[u8]) -> EthResult<()>; -} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 17dc8fcb809f..eec14981bf57 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -46,7 +46,7 @@ mod web3; pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; -pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; +pub use eth::{EthApi, EthBundle, EthFilter, EthPubSub}; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; @@ -54,4 +54,3 @@ pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; pub use web3::Web3Api; -pub mod result; diff --git a/crates/rpc/rpc/src/net.rs b/crates/rpc/rpc/src/net.rs index 8e6615a281bf..79e85ac48e02 100644 --- a/crates/rpc/rpc/src/net.rs +++ b/crates/rpc/rpc/src/net.rs @@ -1,8 +1,8 @@ -use crate::eth::EthApiSpec; use jsonrpsee::core::RpcResult as Result; use reth_network_api::PeersInfo; use reth_primitives::U64; use reth_rpc_api::NetApiServer; +use reth_rpc_eth_api::helpers::EthApiSpec; use reth_rpc_types::PeerCount; /// `Net` API implementation. diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index ec472be4d6e9..b20ec0e1c723 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,20 +1,27 @@ use alloy_primitives::Bytes; use async_trait::async_trait; +use futures::future::BoxFuture; use jsonrpsee::core::RpcResult; -use revm_inspectors::transfer::{TransferInspector, TransferKind}; -use revm_primitives::ExecutionResult; - use reth_primitives::{Address, BlockId, BlockNumberOrTag, TxHash, B256}; use reth_rpc_api::{EthApiServer, OtterscanServer}; +use reth_rpc_eth_api::helpers::TraceExt; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::result::internal_rpc_err; use reth_rpc_types::{ - trace::otterscan::{ - BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, - OtsReceipt, OtsTransactionReceipt, TraceEntry, TransactionsWithReceipts, + trace::{ + otterscan::{ + BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, + OtsReceipt, OtsTransactionReceipt, TraceEntry, TransactionsWithReceipts, + }, + parity::{Action, CreateAction, CreateOutput, TraceOutput}, }, - BlockTransactions, Transaction, + BlockTransactions, Header, }; - -use crate::{eth::EthTransactions, result::internal_rpc_err}; +use revm_inspectors::{ + tracing::{types::CallTraceNode, TracingInspectorConfig}, + transfer::{TransferInspector, TransferKind}, +}; +use revm_primitives::ExecutionResult; const API_LEVEL: u64 = 8; @@ -24,6 +31,41 @@ pub struct OtterscanApi { eth: Eth, } +/// Performs a binary search within a given block range to find the desired block number. +/// +/// The binary search is performed by calling the provided asynchronous `check` closure on the +/// blocks of the range. The closure should return a future representing the result of performing +/// the desired logic at a given block. The future resolves to an `bool` where: +/// - `true` indicates that the condition has been matched, but we can try to find a lower block to +/// make the condition more matchable. +/// - `false` indicates that the condition not matched, so the target is not present in the current +/// block and should continue searching in a higher range. +/// +/// Args: +/// - `low`: The lower bound of the block range (inclusive). +/// - `high`: The upper bound of the block range (inclusive). +/// - `check`: A closure that performs the desired logic at a given block. +async fn binary_search<'a, F>(low: u64, high: u64, check: F) -> RpcResult +where + F: Fn(u64) -> BoxFuture<'a, RpcResult>, +{ + let mut low = low; + let mut high = high; + let mut num = high; + + while low <= high { + let mid = (low + high) / 2; + if check(mid).await? { + high = mid - 1; + num = mid; + } else { + low = mid + 1 + } + } + + Ok(num) +} + impl OtterscanApi { /// Creates a new instance of `Otterscan`. pub const fn new(eth: Eth) -> Self { @@ -34,8 +76,13 @@ impl OtterscanApi { #[async_trait] impl OtterscanServer for OtterscanApi where - Eth: EthApiServer + EthTransactions, + Eth: EthApiServer + TraceExt + 'static, { + /// Handler for `{ots,erigon}_getHeaderByNumber` + async fn get_header_by_number(&self, block_number: u64) -> RpcResult> { + self.eth.header_by_number(BlockNumberOrTag::Number(block_number)).await + } + /// Handler for `ots_hasCode` async fn has_code(&self, address: Address, block_number: Option) -> RpcResult { self.eth.get_code(address, block_number).await.map(|code| !code.is_empty()) @@ -90,16 +137,39 @@ where } /// Handler for `ots_traceTransaction` - async fn trace_transaction(&self, _tx_hash: TxHash) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult>> { + let traces = self + .eth + .spawn_trace_transaction_in_block( + tx_hash, + TracingInspectorConfig::default_parity(), + move |_tx_info, inspector, _, _| Ok(inspector.into_traces().into_nodes()), + ) + .await? + .map(|traces| { + traces + .into_iter() + .map(|CallTraceNode { trace, .. }| TraceEntry { + r#type: if trace.is_selfdestruct() { + "SELFDESTRUCT".to_string() + } else { + trace.kind.to_string() + }, + depth: trace.depth as u32, + from: trace.caller, + to: trace.address, + value: trace.value, + input: trace.data, + output: trace.output, + }) + .collect::>() + }); + Ok(traces) } /// Handler for `ots_getBlockDetails` - async fn get_block_details( - &self, - block_number: BlockNumberOrTag, - ) -> RpcResult> { - let block = self.eth.block_by_number(block_number, true).await?; + async fn get_block_details(&self, block_number: u64) -> RpcResult> { + let block = self.eth.block_by_number(BlockNumberOrTag::Number(block_number), true).await?; Ok(block.map(Into::into)) } @@ -112,11 +182,12 @@ where /// Handler for `getBlockTransactions` async fn get_block_transactions( &self, - block_number: BlockNumberOrTag, + block_number: u64, page_number: usize, page_size: usize, ) -> RpcResult { // retrieve full block and its receipts + let block_number = BlockNumberOrTag::Number(block_number); let block = self.eth.block_by_number(block_number, true); let receipts = self.eth.block_receipts(BlockId::Number(block_number)); let (block, receipts) = futures::try_join!(block, receipts)?; @@ -180,7 +251,7 @@ where async fn search_transactions_before( &self, _address: Address, - _block_number: BlockNumberOrTag, + _block_number: u64, _page_size: usize, ) -> RpcResult { Err(internal_rpc_err("unimplemented")) @@ -190,7 +261,7 @@ where async fn search_transactions_after( &self, _address: Address, - _block_number: BlockNumberOrTag, + _block_number: u64, _page_size: usize, ) -> RpcResult { Err(internal_rpc_err("unimplemented")) @@ -199,14 +270,130 @@ where /// Handler for `getTransactionBySenderAndNonce` async fn get_transaction_by_sender_and_nonce( &self, - _sender: Address, - _nonce: u64, - ) -> RpcResult> { - Err(internal_rpc_err("unimplemented")) + sender: Address, + nonce: u64, + ) -> RpcResult> { + // Check if the sender is a contract + if self.has_code(sender, None).await? { + return Ok(None) + } + + let highest = + EthApiServer::transaction_count(&self.eth, sender, None).await?.saturating_to::(); + + // If the nonce is higher or equal to the highest nonce, the transaction is pending or not + // exists. + if nonce >= highest { + return Ok(None) + } + + // perform a binary search over the block range to find the block in which the sender's + // nonce reached the requested nonce. + let num = binary_search(1, self.eth.block_number()?.saturating_to(), |mid| { + Box::pin(async move { + let mid_nonce = + EthApiServer::transaction_count(&self.eth, sender, Some(mid.into())) + .await? + .saturating_to::(); + + // The `transaction_count` returns the `nonce` after the transaction was + // executed, which is the state of the account after the block, and we need to find + // the transaction whose nonce is the pre-state, so need to compare with `nonce`(no + // equal). + Ok(mid_nonce > nonce) + }) + }) + .await?; + + let Some(BlockTransactions::Full(transactions)) = + self.eth.block_by_number(num.into(), true).await?.map(|block| block.inner.transactions) + else { + return Err(EthApiError::UnknownBlockNumber.into()); + }; + + Ok(transactions + .into_iter() + .find(|tx| tx.from == sender && tx.nonce == nonce) + .map(|tx| tx.hash)) } /// Handler for `getContractCreator` - async fn get_contract_creator(&self, _address: Address) -> RpcResult> { - Err(internal_rpc_err("unimplemented")) + async fn get_contract_creator(&self, address: Address) -> RpcResult> { + if !self.has_code(address, None).await? { + return Ok(None); + } + + let num = binary_search(1, self.eth.block_number()?.saturating_to(), |mid| { + Box::pin( + async move { Ok(!self.eth.get_code(address, Some(mid.into())).await?.is_empty()) }, + ) + }) + .await?; + + let traces = self + .eth + .trace_block_with( + num.into(), + TracingInspectorConfig::default_parity(), + |tx_info, inspector, _, _, _| { + Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) + }, + ) + .await? + .map(|traces| { + traces + .into_iter() + .flatten() + .map(|tx_trace| { + let trace = tx_trace.trace; + Ok(match (trace.action, trace.result, trace.error) { + ( + Action::Create(CreateAction { from: creator, .. }), + Some(TraceOutput::Create(CreateOutput { + address: contract, .. + })), + None, + ) if contract == address => Some(ContractCreator { + hash: tx_trace + .transaction_hash + .ok_or_else(|| EthApiError::TransactionNotFound)?, + creator, + }), + _ => None, + }) + }) + .filter_map(Result::transpose) + .collect::, EthApiError>>() + }) + .transpose()?; + + // A contract maybe created and then destroyed in multiple transactions, here we + // return the first found transaction, this behavior is consistent with etherscan's + let found = traces.and_then(|traces| traces.first().cloned()); + Ok(found) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_binary_search() { + // in the middle + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 5) })).await; + assert_eq!(num, Ok(5)); + + // in the upper + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 7) })).await; + assert_eq!(num, Ok(7)); + + // in the lower + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 1) })).await; + assert_eq!(num, Ok(1)); + + // high than the upper + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 11) })).await; + assert_eq!(num, Ok(10)); } } diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 624523567bde..bfb56075f182 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,12 +1,13 @@ -use crate::eth::error::{EthApiError, EthResult}; +use std::{collections::HashMap, future::Future, sync::Arc}; + use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; use reth_primitives::{Address, BlockId, U256}; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; +use reth_rpc_eth_types::{EthApiError, EthResult}; use reth_tasks::TaskSpawner; -use std::{collections::HashMap, future::Future, sync::Arc}; use tokio::sync::oneshot; /// `reth` API implementation. diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index a16603807692..c7fc2e99e381 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,18 +1,24 @@ -use crate::eth::{ - error::{EthApiError, EthResult}, - revm_utils::{prepare_call_env, EvmOverrides}, - utils::recover_raw_transaction, - EthTransactions, -}; +use std::{collections::HashSet, sync::Arc}; + use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; -use reth_consensus_common::calc::{base_block_reward, block_reward}; -use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, SealedHeader, B256, U256}; +use reth_chainspec::EthereumHardforks; +use reth_consensus_common::calc::{ + base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, +}; +use reth_evm::ConfigureEvmEnv; +use reth_primitives::{BlockId, Bytes, Header, B256, U256}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; +use reth_rpc_eth_api::helpers::{Call, TraceExt}; +use reth_rpc_eth_types::{ + error::{EthApiError, EthResult}, + revm_utils::prepare_call_env, + utils::recover_raw_transaction, +}; use reth_rpc_types::{ - state::StateOverride, + state::{EvmOverrides, StateOverride}, trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, @@ -30,7 +36,6 @@ use revm_inspectors::{ opcode::OpcodeGasInspector, tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, }; -use std::{collections::HashSet, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. @@ -72,7 +77,7 @@ impl TraceApi { impl TraceApi where Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, - Eth: EthTransactions + 'static, + Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { @@ -84,6 +89,10 @@ where let this = self.clone(); self.eth_api() .spawn_with_call_at(trace_request.call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', see + // + let db = db.0; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let trace_res = inspector.into_parity_builder().into_trace_results_with_state( &res, @@ -105,8 +114,12 @@ where let tx = recover_raw_transaction(tx)?; let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; - let tx = tx_env_with_recovered(&tx.into_ecrecovered_transaction()); - let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx); + + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg, + block, + Call::evm_config(self.eth_api()).tx_env(&tx.into_ecrecovered_transaction()), + ); let config = TracingInspectorConfig::from_parity_config(&trace_types); @@ -239,7 +252,7 @@ where filter: TraceFilter, ) -> EthResult> { let matcher = filter.matcher(); - let TraceFilter { from_block, to_block, after: _after, count: _count, .. } = filter; + let TraceFilter { from_block, to_block, .. } = filter; let start = from_block.unwrap_or(0); let end = if let Some(to_block) = to_block { to_block @@ -247,6 +260,12 @@ where self.provider().best_block_number()? }; + if start > end { + return Err(EthApiError::InvalidParams( + "invalid parameters: fromBlock cannot be greater than toBlock".to_string(), + )) + } + // ensure that the range is not too large, since we need to fetch all blocks in the range let distance = end.saturating_sub(start); if distance > 100 { @@ -260,7 +279,7 @@ where // find relevant blocks to trace let mut target_blocks = Vec::new(); - for block in blocks { + for block in &blocks { let mut transaction_indices = HashSet::new(); let mut highest_matching_index = 0; for (tx_idx, tx) in block.body.iter().enumerate() { @@ -302,11 +321,26 @@ where } let block_traces = futures::future::try_join_all(block_traces).await?; - let all_traces = block_traces + let mut all_traces = block_traces .into_iter() .flatten() .flat_map(|traces| traces.into_iter().flatten().flat_map(|traces| traces.into_iter())) - .collect(); + .collect::>(); + + // add reward traces for all blocks + for block in &blocks { + if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { + all_traces.extend(self.extract_reward_traces( + &block.header, + &block.ommers, + base_block_reward, + )); + } else { + // no block reward, means we're past the Paris hardfork and don't expect any rewards + // because the blocks in ascending order + break + } + } Ok(all_traces) } @@ -349,43 +383,19 @@ where }, ); - let block = self.inner.eth_api.block_by_id(block_id); + let block = self.inner.eth_api.block(block_id); let (maybe_traces, maybe_block) = futures::try_join!(traces, block)?; let mut maybe_traces = maybe_traces.map(|traces| traces.into_iter().flatten().collect::>()); if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { - if let Some(header_td) = self.provider().header_td(&block.header.hash())? { - if let Some(base_block_reward) = base_block_reward( - self.provider().chain_spec().as_ref(), - block.header.number, - block.header.difficulty, - header_td, - ) { - traces.push(reward_trace( - &block.header, - RewardAction { - author: block.header.beneficiary, - reward_type: RewardType::Block, - value: U256::from(base_block_reward), - }, - )); - - if !block.ommers.is_empty() { - traces.push(reward_trace( - &block.header, - RewardAction { - author: block.header.beneficiary, - reward_type: RewardType::Uncle, - value: U256::from( - block_reward(base_block_reward, block.ommers.len()) - - base_block_reward, - ), - }, - )); - } - } + if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { + traces.extend(self.extract_reward_traces( + &block.header, + &block.ommers, + base_block_reward, + )); } } @@ -456,7 +466,7 @@ where let res = self .inner .eth_api - .trace_block_with_inspector( + .trace_block_inspector( block_id, OpcodeGasInspector::default, move |tx_info, inspector, _res, _, _| { @@ -471,21 +481,85 @@ where let Some(transactions) = res else { return Ok(None) }; - let Some(block) = self.inner.eth_api.block_by_id(block_id).await? else { return Ok(None) }; + let Some(block) = self.inner.eth_api.block(block_id).await? else { return Ok(None) }; Ok(Some(BlockOpcodeGas { block_hash: block.hash(), - block_number: block.number, + block_number: block.header.number, transactions, })) } + + /// Calculates the base block reward for the given block: + /// + /// - if Paris hardfork is activated, no block rewards are given + /// - if Paris hardfork is not activated, calculate block rewards with block number only + /// - if Paris hardfork is unknown, calculate block rewards with block number and ttd + fn calculate_base_block_reward(&self, header: &Header) -> EthResult> { + let chain_spec = self.provider().chain_spec(); + let is_paris_activated = chain_spec.is_paris_active_at_block(header.number); + + Ok(match is_paris_activated { + Some(true) => None, + Some(false) => Some(base_block_reward_pre_merge(&chain_spec, header.number)), + None => { + // if Paris hardfork is unknown, we need to fetch the total difficulty at the + // block's height and check if it is pre-merge to calculate the base block reward + if let Some(header_td) = self.provider().header_td_by_number(header.number)? { + base_block_reward( + chain_spec.as_ref(), + header.number, + header.difficulty, + header_td, + ) + } else { + None + } + } + }) + } + + /// Extracts the reward traces for the given block: + /// - block reward + /// - uncle rewards + fn extract_reward_traces( + &self, + header: &Header, + ommers: &[Header], + base_block_reward: u128, + ) -> Vec { + let mut traces = Vec::with_capacity(ommers.len() + 1); + + let block_reward = block_reward(base_block_reward, ommers.len()); + traces.push(reward_trace( + header, + RewardAction { + author: header.beneficiary, + reward_type: RewardType::Block, + value: U256::from(block_reward), + }, + )); + + for uncle in ommers { + let uncle_reward = ommer_reward(base_block_reward, header.number, uncle.number); + traces.push(reward_trace( + header, + RewardAction { + author: uncle.beneficiary, + reward_type: RewardType::Uncle, + value: U256::from(uncle_reward), + }, + )); + } + traces + } } #[async_trait] impl TraceApiServer for TraceApi where Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, - Eth: EthTransactions + 'static, + Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. /// @@ -622,9 +696,9 @@ struct TraceApiInner { /// Helper to construct a [`LocalizedTransactionTrace`] that describes a reward to the block /// beneficiary. -fn reward_trace(header: &SealedHeader, reward: RewardAction) -> LocalizedTransactionTrace { +fn reward_trace(header: &Header, reward: RewardAction) -> LocalizedTransactionTrace { LocalizedTransactionTrace { - block_hash: Some(header.hash()), + block_hash: Some(header.hash_slow()), block_number: Some(header.number), transaction_hash: None, transaction_position: None, diff --git a/crates/rpc/rpc/src/web3.rs b/crates/rpc/rpc/src/web3.rs index 4ed94ac85523..787604e25e23 100644 --- a/crates/rpc/rpc/src/web3.rs +++ b/crates/rpc/rpc/src/web3.rs @@ -1,9 +1,9 @@ -use crate::result::ToRpcResult; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_network_api::NetworkInfo; use reth_primitives::{keccak256, Bytes, B256}; use reth_rpc_api::Web3ApiServer; +use reth_rpc_server_types::ToRpcResult; /// `web3` API implementation. /// diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index cb58818b503c..a5db5b9fb20c 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-db-api.workspace = true reth-static-file.workspace = true @@ -22,6 +22,9 @@ reth-consensus.workspace = true reth-prune.workspace = true reth-errors.workspace = true reth-stages-types.workspace = true +reth-static-file-types.workspace = true + +alloy-primitives.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index ab525f8f5331..2f113f2fa813 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,9 +1,11 @@ use crate::PipelineEvent; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; -use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; +use reth_primitives_traits::SealedHeader; use reth_provider::ProviderError; +use reth_static_file_types::StaticFileSegment; use thiserror::Error; use tokio::sync::broadcast::error::SendError; diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index 46f23d79311c..cbb407c4d0e4 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -1,5 +1,6 @@ use crate::{metrics::SyncMetrics, StageCheckpoint, StageId}; -use reth_primitives::{constants::MGAS_TO_GAS, BlockNumber}; +use alloy_primitives::BlockNumber; +use reth_primitives_traits::constants::MEGAGAS; use std::{ future::Future, pin::Pin, @@ -82,7 +83,7 @@ impl MetricsListener { } } MetricEvent::ExecutionStageGas { gas } => { - self.sync_metrics.execution_stage.mgas_processed_total.increment(gas / MGAS_TO_GAS) + self.sync_metrics.execution_stage.mgas_processed_total.increment(gas / MEGAGAS) } } } diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index ada5ffd3734a..1e83af4c3c82 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -1,6 +1,6 @@ use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageId, StageSet}; +use alloy_primitives::{BlockNumber, B256}; use reth_db_api::database::Database; -use reth_primitives::{BlockNumber, B256}; use reth_provider::ProviderFactory; use reth_static_file::StaticFileProducer; use tokio::sync::watch; diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 83dbe0cf394c..8fc64c2ab708 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -1,4 +1,5 @@ -use reth_primitives::{BlockNumber, SealedHeader}; +use alloy_primitives::BlockNumber; +use reth_primitives_traits::SealedHeader; /// Determines the control flow during pipeline execution. /// diff --git a/crates/stages/api/src/pipeline/event.rs b/crates/stages/api/src/pipeline/event.rs index 9bbaaa79b468..879725886cf9 100644 --- a/crates/stages/api/src/pipeline/event.rs +++ b/crates/stages/api/src/pipeline/event.rs @@ -2,7 +2,7 @@ use crate::{ stage::{ExecOutput, UnwindInput, UnwindOutput}, StageCheckpoint, StageId, }; -use reth_primitives::BlockNumber; +use alloy_primitives::BlockNumber; use std::fmt::{Display, Formatter}; /// An event emitted by a [Pipeline][crate::Pipeline]. diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 9d5b8b9ba01b..6fa5cb9500c0 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -2,13 +2,11 @@ mod ctrl; mod event; pub use crate::pipeline::ctrl::ControlFlow; use crate::{PipelineTarget, StageCheckpoint, StageId}; +use alloy_primitives::{BlockNumber, B256}; pub use event::*; use futures_util::Future; use reth_db_api::database::Database; -use reth_primitives::{ - constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, static_file::HighestStaticFiles, BlockNumber, - B256, -}; +use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::StaticFileWriter, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, @@ -237,38 +235,21 @@ where /// Run [static file producer](StaticFileProducer) and [pruner](reth_prune::Pruner) to **move** /// all data from the database to static files for corresponding - /// [segments](reth_primitives::static_file::StaticFileSegment), according to their [stage + /// [segments](reth_static_file_types::StaticFileSegment), according to their [stage /// checkpoints](StageCheckpoint): - /// - [`StaticFileSegment::Headers`](reth_primitives::static_file::StaticFileSegment::Headers) - /// -> [`StageId::Headers`] - /// - [`StaticFileSegment::Receipts`](reth_primitives::static_file::StaticFileSegment::Receipts) - /// -> [`StageId::Execution`] - /// - [`StaticFileSegment::Transactions`](reth_primitives::static_file::StaticFileSegment::Transactions) + /// - [`StaticFileSegment::Headers`](reth_static_file_types::StaticFileSegment::Headers) -> + /// [`StageId::Headers`] + /// - [`StaticFileSegment::Receipts`](reth_static_file_types::StaticFileSegment::Receipts) -> + /// [`StageId::Execution`] + /// - [`StaticFileSegment::Transactions`](reth_static_file_types::StaticFileSegment::Transactions) /// -> [`StageId::Bodies`] /// /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. pub fn move_to_static_files(&self) -> RethResult<()> { - let static_file_producer = self.static_file_producer.lock(); - // Copies data from database to static files - let lowest_static_file_height = { - let provider = self.provider_factory.provider()?; - let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] - .into_iter() - .map(|stage| { - provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number)) - }) - .collect::, _>>()?; - - let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: stages_checkpoints[0], - receipts: stages_checkpoints[1], - transactions: stages_checkpoints[2], - })?; - static_file_producer.run(targets)?; - stages_checkpoints.into_iter().min().expect("exists") - }; + let lowest_static_file_height = + self.static_file_producer.lock().copy_to_static_files()?.min(); // Deletes data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { diff --git a/crates/stages/api/src/pipeline/progress.rs b/crates/stages/api/src/pipeline/progress.rs index 4bb1848543a9..4138f95ddf59 100644 --- a/crates/stages/api/src/pipeline/progress.rs +++ b/crates/stages/api/src/pipeline/progress.rs @@ -1,5 +1,5 @@ use crate::{util::opt, ControlFlow}; -use reth_primitives::BlockNumber; +use alloy_primitives::BlockNumber; #[derive(Debug, Default)] pub(crate) struct PipelineProgress { diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index acdae56e3568..279d8b38d5fd 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -1,6 +1,6 @@ use crate::{error::StageError, StageCheckpoint, StageId}; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_db_api::database::Database; -use reth_primitives::{BlockNumber, TxNumber}; use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError, TransactionsProvider}; use std::{ cmp::{max, min}, diff --git a/crates/stages/api/src/test_utils.rs b/crates/stages/api/src/test_utils.rs index 1495c54b0601..8d76cee31bef 100644 --- a/crates/stages/api/src/test_utils.rs +++ b/crates/stages/api/src/test_utils.rs @@ -16,7 +16,7 @@ pub struct TestStage { } impl TestStage { - pub fn new(id: StageId) -> Self { + pub const fn new(id: StageId) -> Self { Self { id, exec_outputs: VecDeque::new(), unwind_outputs: VecDeque::new() } } diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 44e344f8aa58..5d8ed3d5286c 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec = { workspace = true, optional = true } reth-codecs.workspace = true reth-config.workspace = true reth-consensus.workspace = true @@ -23,11 +24,13 @@ reth-evm.workspace = true reth-exex.workspace = true reth-network-p2p.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true +reth-execution-types.workspace = true reth-prune-types.workspace = true reth-storage-errors.workspace = true reth-revm.workspace = true -reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } @@ -48,6 +51,7 @@ tempfile = { workspace = true, optional = true } [dev-dependencies] # reth +reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } reth-evm-ethereum.workspace = true @@ -85,6 +89,7 @@ pprof = { workspace = true, features = [ [features] test-utils = [ + "dep:reth-chainspec", "reth-network-p2p/test-utils", "reth-db/test-utils", "reth-provider/test-utils", diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index de08526bec4a..0f2dd2acf692 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -139,7 +139,11 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let offset = transitions.len() as u64; db.insert_changesets(transitions, None).unwrap(); - db.commit(|tx| Ok(updates.flush(tx)?)).unwrap(); + db.commit(|tx| { + updates.write_to_database(tx)?; + Ok(()) + }) + .unwrap(); let (transitions, final_state) = random_changeset_range( &mut rng, diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 4e60e168a1bb..10b2e1976db5 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -17,7 +17,8 @@ //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_network_p2p::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_evm_ethereum::execute::EthExecutorProvider; -//! # use reth_primitives::{MAINNET, B256}; +//! # use reth_primitives::B256; +//! # use reth_chainspec::MAINNET; //! # use reth_prune_types::PruneModes; //! # use reth_network_peers::PeerId; //! # use reth_stages::Pipeline; @@ -26,7 +27,6 @@ //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::ProviderFactory; //! # use reth_provider::StaticFileProviderFactory; -//! # use reth_provider::HeaderSyncMode; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; @@ -57,7 +57,7 @@ //! .with_tip_sender(tip_tx) //! .add_stages(DefaultStages::new( //! provider_factory.clone(), -//! HeaderSyncMode::Tip(tip_rx), +//! tip_rx, //! consensus, //! headers_downloader, //! bodies_downloader, diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 8c82902bf2a4..d5d132810daa 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -12,7 +12,7 @@ //! ```no_run //! # use reth_stages::Pipeline; //! # use reth_stages::sets::{OfflineStages}; -//! # use reth_primitives::MAINNET; +//! # use reth_chainspec::MAINNET; //! # use reth_prune_types::PruneModes; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::StaticFileProviderFactory; @@ -46,9 +46,11 @@ use reth_consensus::Consensus; use reth_db_api::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; -use reth_provider::{HeaderSyncGapProvider, HeaderSyncMode}; +use reth_primitives::B256; +use reth_provider::HeaderSyncGapProvider; use reth_prune_types::PruneModes; use std::sync::Arc; +use tokio::sync::watch; /// A set containing all stages to run a fully syncing instance of reth. /// @@ -88,7 +90,7 @@ impl DefaultStages { #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, - header_mode: HeaderSyncMode, + tip: watch::Receiver, consensus: Arc, header_downloader: H, body_downloader: B, @@ -102,7 +104,7 @@ impl DefaultStages { Self { online: OnlineStages::new( provider, - header_mode, + tip, consensus, header_downloader, body_downloader, @@ -159,8 +161,8 @@ where pub struct OnlineStages { /// Sync gap provider for the headers stage. provider: Provider, - /// The sync mode for the headers stage. - header_mode: HeaderSyncMode, + /// The tip for the headers stage. + tip: watch::Receiver, /// The consensus engine used to validate incoming data. consensus: Arc, /// The block header downloader @@ -175,13 +177,13 @@ impl OnlineStages { /// Create a new set of online stages with default values. pub fn new( provider: Provider, - header_mode: HeaderSyncMode, + tip: watch::Receiver, consensus: Arc, header_downloader: H, body_downloader: B, stages_config: StageConfig, ) -> Self { - Self { provider, header_mode, consensus, header_downloader, body_downloader, stages_config } + Self { provider, tip, consensus, header_downloader, body_downloader, stages_config } } } @@ -203,7 +205,7 @@ where pub fn builder_with_bodies( bodies: BodyStage, provider: Provider, - mode: HeaderSyncMode, + tip: watch::Receiver, header_downloader: H, consensus: Arc, stages_config: StageConfig, @@ -212,7 +214,7 @@ where .add_stage(HeaderStage::new( provider, header_downloader, - mode, + tip, consensus.clone(), stages_config.etl, )) @@ -232,7 +234,7 @@ where .add_stage(HeaderStage::new( self.provider, self.header_downloader, - self.header_mode, + self.tip, self.consensus.clone(), self.stages_config.etl.clone(), )) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index a1c7c8c9efad..72ea340e9d22 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -741,7 +741,7 @@ mod tests { let transaction = random_signed_tx(&mut rng); static_file_producer .append_transaction(tx_num, transaction.into()) - .map(|_| ()) + .map(drop) })?; if body.tx_count != 0 { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 0e97fc9e07cd..fd54ce931d5f 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -4,20 +4,21 @@ use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{BlockNumber, Header, StaticFileSegment}; +use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, Chain, DatabaseProviderRW, ExecutionOutcome, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, StatsReader, - TransactionVariant, + BlockReader, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, + ProviderError, StateWriter, StatsReader, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, - ExecutionCheckpoint, MetricEvent, MetricEventsSender, Stage, StageCheckpoint, StageError, - StageId, UnwindInput, UnwindOutput, + ExecutionCheckpoint, ExecutionStageThresholds, MetricEvent, MetricEventsSender, Stage, + StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use std::{ cmp::Ordering, @@ -106,7 +107,7 @@ impl ExecutionStage { /// Create an execution stage with the provided executor. /// - /// The commit threshold will be set to `10_000`. + /// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD`]. pub fn new_with_executor(executor_provider: E) -> Self { Self::new( executor_provider, @@ -221,8 +222,9 @@ where provider.tx_ref(), provider.static_file_provider().clone(), )); - let mut executor = self.executor_provider.batch_executor(db, prune_modes); + let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); + executor.set_prune_modes(prune_modes); // Progress tracking let mut stage_progress = start_block; @@ -231,6 +233,13 @@ where let mut fetch_block_duration = Duration::default(); let mut execution_duration = Duration::default(); + + let mut last_block = start_block; + let mut last_execution_duration = Duration::default(); + let mut last_cumulative_gas = 0; + let mut last_log_instant = Instant::now(); + let log_duration = Duration::from_secs(10); + debug!(target: "sync::stages::execution", start = start_block, end = max_block, "Executing range"); // Execute block range @@ -269,6 +278,22 @@ where })?; execution_duration += execute_start.elapsed(); + // Log execution throughput + if last_log_instant.elapsed() >= log_duration { + info!( + target: "sync::stages::execution", + start = last_block, + end = block_number, + throughput = format_gas_throughput(cumulative_gas - last_cumulative_gas, execution_duration - last_execution_duration), + "Executed block range" + ); + + last_block = block_number + 1; + last_execution_duration = execution_duration; + last_cumulative_gas = cumulative_gas; + last_log_instant = Instant::now(); + } + // Gas metrics if let Some(metrics_tx) = &mut self.metrics_tx { let _ = @@ -390,7 +415,7 @@ where // Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent. if self.exex_manager_handle.has_exexs() { // Get the blocks for the unwound range. - let blocks = provider.get_take_block_range::(range.clone())?; + let blocks = provider.sealed_block_with_senders_range(range.clone())?; let previous_input = self.post_unwind_commit_input.replace(Chain::new( blocks, bundle_state_with_receipts, @@ -543,83 +568,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -/// The thresholds at which the execution stage writes state changes to the database. -/// -/// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage -/// commits all pending changes to the database. -/// -/// A third threshold, `max_changesets`, can be set to periodically write changesets to the -/// current database transaction, which frees up memory. -#[derive(Debug, Clone)] -pub struct ExecutionStageThresholds { - /// The maximum number of blocks to execute before the execution stage commits. - pub max_blocks: Option, - /// The maximum number of state changes to keep in memory before the execution stage commits. - pub max_changes: Option, - /// The maximum cumulative amount of gas to process before the execution stage commits. - pub max_cumulative_gas: Option, - /// The maximum spent on blocks processing before the execution stage commits. - pub max_duration: Option, -} - -impl Default for ExecutionStageThresholds { - fn default() -> Self { - Self { - max_blocks: Some(500_000), - max_changes: Some(5_000_000), - //WVM: 50k full blocks of 300M gas - max_cumulative_gas: Some(300_000_000 * 50_000), - // 10 minutes - max_duration: Some(Duration::from_secs(10 * 60)), - } - } -} - -impl ExecutionStageThresholds { - /// Check if the batch thresholds have been hit. - #[inline] - pub fn is_end_of_batch( - &self, - blocks_processed: u64, - changes_processed: u64, - cumulative_gas_used: u64, - elapsed: Duration, - ) -> bool { - blocks_processed >= self.max_blocks.unwrap_or(u64::MAX) || - changes_processed >= self.max_changes.unwrap_or(u64::MAX) || - cumulative_gas_used >= self.max_cumulative_gas.unwrap_or(u64::MAX) || - elapsed >= self.max_duration.unwrap_or(Duration::MAX) - } -} - -impl From for ExecutionStageThresholds { - fn from(config: ExecutionConfig) -> Self { - Self { - max_blocks: config.max_blocks, - max_changes: config.max_changes, - max_cumulative_gas: config.max_cumulative_gas, - max_duration: config.max_duration, - } - } -} - -/// Returns a formatted gas throughput log, showing either: -/// * "Kgas/s", or 1,000 gas per second -/// * "Mgas/s", or 1,000,000 gas per second -/// * "Ggas/s", or 1,000,000,000 gas per second -/// -/// Depending on the magnitude of the gas throughput. -pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { - let gas_per_second = gas as f64 / execution_duration.as_secs_f64(); - if gas_per_second < 1_000_000.0 { - format!("{:.} Kgas/second", gas_per_second / 1_000.0) - } else if gas_per_second < 1_000_000_000.0 { - format!("{:.} Mgas/second", gas_per_second / 1_000_000.0) - } else { - format!("{:.} Ggas/second", gas_per_second / 1_000_000_000.0) - } -} - /// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency /// check. /// @@ -704,12 +652,13 @@ mod tests { use crate::test_utils::TestStageDB; use alloy_rlp::Decodable; use assert_matches::assert_matches; + use reth_chainspec::ChainSpecBuilder; use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_execution_errors::BlockValidationError; use reth_primitives::{ - address, hex_literal::hex, keccak256, Account, Address, Bytecode, ChainSpecBuilder, - SealedBlock, StorageEntry, B256, U256, + address, hex_literal::hex, keccak256, Account, Address, Bytecode, SealedBlock, + StorageEntry, B256, U256, }; use reth_provider::{ test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, @@ -737,22 +686,6 @@ mod tests { ) } - #[test] - fn test_gas_throughput_fmt() { - let duration = Duration::from_secs(1); - let gas = 100_000; - let throughput = format_gas_throughput(gas, duration); - assert_eq!(throughput, "100 Kgas/second"); - - let gas = 100_000_000; - let throughput = format_gas_throughput(gas, duration); - assert_eq!(throughput, "100 Mgas/second"); - - let gas = 100_000_000_000; - let throughput = format_gas_throughput(gas, duration); - assert_eq!(throughput, "100 Ggas/second"); - } - #[test] fn execution_checkpoint_matches() { let factory = create_test_provider_factory(); @@ -791,12 +724,9 @@ mod tests { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError) .unwrap(), - None, ) .unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -836,10 +766,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -879,10 +807,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -916,10 +842,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1067,10 +991,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1186,10 +1108,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 978c0f716f48..f547b86245ab 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -75,7 +75,7 @@ impl AccountHashingStage { let blocks = random_block_range(&mut rng, opts.blocks.clone(), B256::ZERO, opts.txs); for block in blocks { - provider.insert_historical_block(block.try_seal_with_senders().unwrap(), None).unwrap(); + provider.insert_historical_block(block.try_seal_with_senders().unwrap()).unwrap(); } provider .static_file_provider() diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 90bed7e49a77..9bf8c944518f 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -10,11 +10,10 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; -use reth_primitives::{BlockHash, BlockNumber, SealedHeader, StaticFileSegment}; +use reth_primitives::{BlockHash, BlockNumber, SealedHeader, StaticFileSegment, B256}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, DatabaseProviderRW, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, - HeaderSyncMode, }; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, @@ -25,6 +24,7 @@ use std::{ sync::Arc, task::{ready, Context, Poll}, }; +use tokio::sync::watch; use tracing::*; /// The headers stage. @@ -44,8 +44,8 @@ pub struct HeaderStage { provider: Provider, /// Strategy for downloading the headers downloader: Downloader, - /// The sync mode for the stage. - mode: HeaderSyncMode, + /// The tip for the stage. + tip: watch::Receiver, /// Consensus client implementation consensus: Arc, /// Current sync gap. @@ -68,14 +68,14 @@ where pub fn new( database: Provider, downloader: Downloader, - mode: HeaderSyncMode, + tip: watch::Receiver, consensus: Arc, etl_config: EtlConfig, ) -> Self { Self { provider: database, downloader, - mode, + tip, consensus, sync_gap: None, hash_collector: Collector::new(etl_config.file_size / 2, etl_config.dir.clone()), @@ -206,7 +206,7 @@ where } // Lookup the head and tip of the sync range - let gap = self.provider.sync_gap(self.mode.clone(), current_checkpoint.block_number)?; + let gap = self.provider.sync_gap(self.tip.clone(), current_checkpoint.block_number)?; let tip = gap.target.tip(); self.sync_gap = Some(gap.clone()); @@ -376,10 +376,9 @@ mod tests { stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; + use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders, B256}; - use reth_provider::{ - BlockWriter, ExecutionOutcome, ProviderFactory, StaticFileProviderFactory, - }; + use reth_provider::{BlockWriter, ProviderFactory, StaticFileProviderFactory}; use reth_stages_api::StageUnitCheckpoint; use reth_testing_utils::generators::{self, random_header, random_header_range}; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -436,7 +435,7 @@ mod tests { HeaderStage::new( self.db.factory.clone(), (*self.downloader_factory)(), - HeaderSyncMode::Tip(self.channel.1.clone()), + self.channel.1.clone(), self.consensus.clone(), EtlConfig::default(), ) @@ -632,7 +631,6 @@ mod tests { ExecutionOutcome::default(), HashedPostState::default(), TrieUpdates::default(), - None, ) .unwrap(); provider.commit().unwrap(); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index c1abd0d243a8..d7b044497d61 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -23,13 +23,13 @@ use tracing::*; /// they should include in a bug report, since true state root errors can be impossible to debug /// with just basic logs. pub const INVALID_STATE_ROOT_ERROR_MESSAGE: &str = r#" -Invalid state root error on new payload! +Invalid state root error on stage verification! This is an error that likely requires a report to the reth team with additional information. Please include the following information in your report: * This error message * The state root of the block that was rejected * The output of `reth db stats --checksum` from the database that was being used. This will take a long time to run! - * 50-100 lines of logs before and after the first occurrence of this log message. Please search your log output for the first observed occurrence of MAGIC_STATE_ROOT. + * 50-100 lines of logs before and after the first occurrence of the log message with the state root of the block that was rejected. * The debug logs from __the same time period__. To find the default location for these logs, run: `reth --help | grep -A 4 'log.file.directory'` @@ -217,7 +217,7 @@ impl Stage for MerkleStage { })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - updates.flush(tx)?; + updates.write_to_database(tx)?; let checkpoint = MerkleCheckpoint::new( to_block, @@ -237,7 +237,7 @@ impl Stage for MerkleStage { }); } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - updates.flush(tx)?; + updates.write_to_database(tx)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -252,7 +252,7 @@ impl Stage for MerkleStage { error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; - updates.flush(provider.tx_ref())?; + updates.write_to_database(provider.tx_ref())?; let total_hashed_entries = (provider.count_entries::()? + provider.count_entries::()?) @@ -325,7 +325,7 @@ impl Stage for MerkleStage { validate_state_root(block_root, target.seal_slow(), input.unwind_to)?; // Validation passed, apply unwind changes to the database. - updates.flush(provider.tx_ref())?; + updates.write_to_database(provider.tx_ref())?; // TODO(alexey): update entities checkpoint } else { diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 1f7ae1e9b6a0..4b65523bad68 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -42,6 +42,7 @@ mod tests { use super::*; use crate::test_utils::{StorageKind, TestStageDB}; use alloy_rlp::Decodable; + use reth_chainspec::ChainSpecBuilder; use reth_db::{ mdbx::{cursor::Cursor, RW}, tables, @@ -56,8 +57,8 @@ mod tests { use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex::ExExManagerHandle; use reth_primitives::{ - address, hex_literal::hex, keccak256, Account, BlockNumber, Bytecode, ChainSpecBuilder, - SealedBlock, StaticFileSegment, B256, U256, + address, hex_literal::hex, keccak256, Account, BlockNumber, Bytecode, SealedBlock, + StaticFileSegment, B256, U256, }; use reth_provider::{ providers::StaticFileWriter, AccountExtReader, BlockReader, DatabaseProviderFactory, @@ -65,7 +66,9 @@ mod tests { StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; - use reth_stages_api::{ExecInput, PipelineTarget, Stage, StageCheckpoint, StageId}; + use reth_stages_api::{ + ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId, + }; use reth_testing_utils::generators::{self, random_block, random_block_range, random_receipt}; use std::{io::Write, sync::Arc}; @@ -81,11 +84,9 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); + provider_rw.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); provider_rw - .insert_historical_block(genesis.try_seal_with_senders().unwrap(), None) - .unwrap(); - provider_rw - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) + .insert_historical_block(block.clone().try_seal_with_senders().unwrap()) .unwrap(); // Fill with bogus blocks to respect PruneMode distance. @@ -94,9 +95,7 @@ mod tests { for block_number in 2..=tip { let nblock = random_block(&mut rng, block_number, Some(head), Some(0), Some(0)); head = nblock.hash(); - provider_rw - .insert_historical_block(nblock.try_seal_with_senders().unwrap(), None) - .unwrap(); + provider_rw.insert_historical_block(nblock.try_seal_with_senders().unwrap()).unwrap(); } provider_rw .static_file_provider() diff --git a/crates/stages/stages/src/test_utils/set.rs b/crates/stages/stages/src/test_utils/set.rs index df167f5c80e5..d17695168e2c 100644 --- a/crates/stages/stages/src/test_utils/set.rs +++ b/crates/stages/stages/src/test_utils/set.rs @@ -11,7 +11,7 @@ pub struct TestStages { } impl TestStages { - pub fn new( + pub const fn new( exec_outputs: VecDeque>, unwind_outputs: VecDeque>, ) -> Self { diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 45604fa6094d..f149720a83ef 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,3 +1,4 @@ +use reth_chainspec::MAINNET; use reth_db::{ tables, test_utils::{ @@ -16,7 +17,7 @@ use reth_db_api::{ }; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, - StaticFileSegment, StorageEntry, TxHash, TxNumber, B256, MAINNET, U256, + StaticFileSegment, StorageEntry, TxHash, TxNumber, B256, U256, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -349,9 +350,7 @@ impl TestStageDB { let mut writer = provider.latest_writer(StaticFileSegment::Receipts)?; let res = receipts.into_iter().try_for_each(|(block_num, receipts)| { writer.increment_block(StaticFileSegment::Receipts, block_num)?; - for (tx_num, receipt) in receipts { - writer.append_receipt(tx_num, receipt)?; - } + writer.append_receipts(receipts.into_iter().map(Ok))?; Ok(()) }); writer.commit_without_sync_all()?; diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index ab64a89c9036..76bb9f4292c2 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] reth-codecs.workspace = true -reth-trie-types.workspace = true +reth-trie-common.workspace = true alloy-primitives.workspace = true modular-bitfield.workspace = true @@ -24,5 +24,6 @@ serde.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true +proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true -rand.workspace = true \ No newline at end of file +rand.workspace = true diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 34059f7c4a10..ee830015486e 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,7 +1,7 @@ use alloy_primitives::{Address, BlockNumber, B256}; use bytes::Buf; use reth_codecs::{main_codec, Compact}; -use reth_trie_types::{hash_builder::HashBuilderState, StoredSubNode}; +use reth_trie_common::{hash_builder::HashBuilderState, StoredSubNode}; use std::ops::RangeInclusive; use super::StageId; @@ -21,7 +21,7 @@ pub struct MerkleCheckpoint { impl MerkleCheckpoint { /// Creates a new Merkle checkpoint. - pub fn new( + pub const fn new( target_block: BlockNumber, last_account_key: B256, walker_stack: Vec, diff --git a/crates/stages/types/src/execution.rs b/crates/stages/types/src/execution.rs new file mode 100644 index 000000000000..61f7313a380a --- /dev/null +++ b/crates/stages/types/src/execution.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +/// The thresholds at which the execution stage writes state changes to the database. +/// +/// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage +/// commits all pending changes to the database. +/// +/// A third threshold, `max_changesets`, can be set to periodically write changesets to the +/// current database transaction, which frees up memory. +#[derive(Debug, Clone)] +pub struct ExecutionStageThresholds { + /// The maximum number of blocks to execute before the execution stage commits. + pub max_blocks: Option, + /// The maximum number of state changes to keep in memory before the execution stage commits. + pub max_changes: Option, + /// The maximum cumulative amount of gas to process before the execution stage commits. + pub max_cumulative_gas: Option, + /// The maximum spent on blocks processing before the execution stage commits. + pub max_duration: Option, +} + +impl Default for ExecutionStageThresholds { + fn default() -> Self { + Self { + max_blocks: Some(500_000), + max_changes: Some(5_000_000), + // 50k full blocks of 30M gas + max_cumulative_gas: Some(30_000_000 * 50_000), + // 10 minutes + max_duration: Some(Duration::from_secs(10 * 60)), + } + } +} + +impl ExecutionStageThresholds { + /// Check if the batch thresholds have been hit. + #[inline] + pub fn is_end_of_batch( + &self, + blocks_processed: u64, + changes_processed: u64, + cumulative_gas_used: u64, + elapsed: Duration, + ) -> bool { + blocks_processed >= self.max_blocks.unwrap_or(u64::MAX) || + changes_processed >= self.max_changes.unwrap_or(u64::MAX) || + cumulative_gas_used >= self.max_cumulative_gas.unwrap_or(u64::MAX) || + elapsed >= self.max_duration.unwrap_or(Duration::MAX) + } +} diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 93106bd886d1..0132c8b410d8 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod id; @@ -21,6 +19,9 @@ pub use checkpoints::{ StageUnitCheckpoint, StorageHashingCheckpoint, }; +mod execution; +pub use execution::*; + /// Direction and target block for pipeline operations. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PipelineTarget { diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index 4d4e31509aeb..1a1921d58c5c 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true @@ -21,6 +20,10 @@ reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true +reth-static-file-types.workspace = true +reth-stages-types.workspace = true + +alloy-primitives.workspace = true # misc tracing.workspace = true diff --git a/crates/static-file/static-file/src/lib.rs b/crates/static-file/static-file/src/lib.rs index f545298ebd47..1bfe4134e954 100644 --- a/crates/static-file/static-file/src/lib.rs +++ b/crates/static-file/static-file/src/lib.rs @@ -16,3 +16,6 @@ pub use static_file_producer::{ StaticFileProducer, StaticFileProducerInner, StaticFileProducerResult, StaticFileProducerWithResult, StaticFileTargets, }; + +// Re-export for convenience. +pub use reth_static_file_types::*; diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 5fb1a4422e82..5824d1d1ac7d 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -1,13 +1,14 @@ -use crate::segments::{dataset_for_compression, prepare_jar, Segment, SegmentHeader}; -use reth_db::{static_file::create_static_file_T1_T2_T3, tables, RawKey, RawTable}; +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; -use reth_primitives::{static_file::SegmentConfig, BlockNumber, StaticFileSegment}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, DatabaseProviderRO, }; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; -use std::{ops::RangeInclusive, path::Path}; +use std::ops::RangeInclusive; /// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. #[derive(Debug, Default)] @@ -55,73 +56,4 @@ impl Segment for Headers { Ok(()) } - - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let range_len = block_range.clone().count(); - let jar = prepare_jar::( - provider, - directory, - StaticFileSegment::Headers, - config, - block_range.clone(), - range_len, - || { - Ok([ - dataset_for_compression::( - provider, - &block_range, - range_len, - )?, - dataset_for_compression::( - provider, - &block_range, - range_len, - )?, - dataset_for_compression::( - provider, - &block_range, - range_len, - )?, - ]) - }, - )?; - - // Generate list of hashes for filters & PHF - let mut cursor = provider.tx_ref().cursor_read::>()?; - let hashes = if config.filters.has_filters() { - Some( - cursor - .walk(Some(RawKey::from(*block_range.start())))? - .take(range_len) - .map(|row| row.map(|(_key, value)| value.into_value()).map_err(|e| e.into())), - ) - } else { - None - }; - - create_static_file_T1_T2_T3::< - tables::Headers, - tables::HeaderTerminalDifficulties, - tables::CanonicalHeaders, - BlockNumber, - SegmentHeader, - >( - provider.tx_ref(), - block_range, - None, - // We already prepared the dictionary beforehand - None::>>>, - hashes, - range_len, - jar, - )?; - - Ok(()) - } } diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index e21f8ad7a12b..1125b2085d99 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -9,23 +9,12 @@ pub use headers::Headers; mod receipts; pub use receipts::Receipts; -use reth_db::{RawKey, RawTable}; -use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; -use reth_nippy_jar::NippyJar; -use reth_primitives::{ - static_file::{ - find_fixed_range, Compression, Filters, InclusionFilter, PerfectHashingFunction, - SegmentConfig, SegmentHeader, - }, - BlockNumber, StaticFileSegment, -}; -use reth_provider::{ - providers::StaticFileProvider, DatabaseProviderRO, ProviderError, TransactionsProviderExt, -}; +use alloy_primitives::BlockNumber; +use reth_db_api::database::Database; +use reth_provider::{providers::StaticFileProvider, DatabaseProviderRO}; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; -use std::{ops::RangeInclusive, path::Path}; - -pub(crate) type Rows = [Vec>; COLUMNS]; +use std::ops::RangeInclusive; /// A segment represents moving some portion of the data to static files. pub trait Segment: Send + Sync { @@ -40,80 +29,4 @@ pub trait Segment: Send + Sync { static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()>; - - /// Create a static file of data for the provided block range. The `directory` parameter - /// determines the static file's save location. - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()>; -} - -/// Returns a [`NippyJar`] according to the desired configuration. The `directory` parameter -/// determines the static file's save location. -pub(crate) fn prepare_jar( - provider: &DatabaseProviderRO, - directory: impl AsRef, - segment: StaticFileSegment, - segment_config: SegmentConfig, - block_range: RangeInclusive, - total_rows: usize, - prepare_compression: impl Fn() -> ProviderResult>, -) -> ProviderResult> { - let tx_range = match segment { - StaticFileSegment::Headers => None, - StaticFileSegment::Receipts | StaticFileSegment::Transactions => { - Some(provider.transaction_range_by_block_range(block_range.clone())?.into()) - } - }; - - let mut nippy_jar = NippyJar::new( - COLUMNS, - &directory.as_ref().join(segment.filename(&find_fixed_range(*block_range.end())).as_str()), - SegmentHeader::new(block_range.clone().into(), Some(block_range.into()), tx_range, segment), - ); - - nippy_jar = match segment_config.compression { - Compression::Lz4 => nippy_jar.with_lz4(), - Compression::Zstd => nippy_jar.with_zstd(false, 0), - Compression::ZstdWithDictionary => { - let dataset = prepare_compression()?; - - nippy_jar = nippy_jar.with_zstd(true, 5_000_000); - nippy_jar - .prepare_compression(dataset.to_vec()) - .map_err(|e| ProviderError::NippyJar(e.to_string()))?; - nippy_jar - } - Compression::Uncompressed => nippy_jar, - }; - - if let Filters::WithFilters(inclusion_filter, phf) = segment_config.filters { - nippy_jar = match inclusion_filter { - InclusionFilter::Cuckoo => nippy_jar.with_cuckoo_filter(total_rows), - }; - nippy_jar = match phf { - PerfectHashingFunction::Fmph => nippy_jar.with_fmph(), - PerfectHashingFunction::GoFmph => nippy_jar.with_gofmph(), - }; - } - - Ok(nippy_jar) -} - -/// Generates the dataset to train a zstd dictionary with the most recent rows (at most 1000). -pub(crate) fn dataset_for_compression>( - provider: &DatabaseProviderRO, - range: &RangeInclusive, - range_len: usize, -) -> ProviderResult>> { - let mut cursor = provider.tx_ref().cursor_read::>()?; - Ok(cursor - .walk_back(Some(RawKey::from(*range.end())))? - .take(range_len.min(1000)) - .map(|row| row.map(|(_key, value)| value.into_value()).expect("should exist")) - .collect::>()) } diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index 06102a7d8a3f..5548e9f99ddf 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -1,16 +1,14 @@ -use crate::segments::{dataset_for_compression, prepare_jar, Segment}; -use reth_db::{static_file::create_static_file_T1, tables}; +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; -use reth_primitives::{ - static_file::{SegmentConfig, SegmentHeader}, - BlockNumber, StaticFileSegment, TxNumber, -}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRO, TransactionsProviderExt, + BlockReader, DatabaseProviderRO, }; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::{ops::RangeInclusive, path::Path}; +use std::ops::RangeInclusive; /// Static File segment responsible for [`StaticFileSegment::Receipts`] part of data. #[derive(Debug, Default)] @@ -42,65 +40,11 @@ impl Segment for Receipts { let mut receipts_cursor = provider.tx_ref().cursor_read::()?; let receipts_walker = receipts_cursor.walk_range(block_body_indices.tx_num_range())?; - for entry in receipts_walker { - let (tx_number, receipt) = entry?; - - static_file_writer.append_receipt(tx_number, receipt)?; - } + static_file_writer.append_receipts( + receipts_walker.map(|result| result.map_err(ProviderError::from)), + )?; } Ok(()) } - - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; - let tx_range_len = tx_range.clone().count(); - - let jar = prepare_jar::( - provider, - directory, - StaticFileSegment::Receipts, - config, - block_range, - tx_range_len, - || { - Ok([dataset_for_compression::( - provider, - &tx_range, - tx_range_len, - )?]) - }, - )?; - - // Generate list of hashes for filters & PHF - let hashes = if config.filters.has_filters() { - Some( - provider - .transaction_hashes_by_range(*tx_range.start()..(*tx_range.end() + 1))? - .into_iter() - .map(|(tx, _)| Ok(tx)), - ) - } else { - None - }; - - create_static_file_T1::( - provider.tx_ref(), - tx_range, - None, - // We already prepared the dictionary beforehand - None::>>>, - hashes, - tx_range_len, - jar, - )?; - - Ok(()) - } } diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 47eaa727250c..4361f8ca661e 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,16 +1,14 @@ -use crate::segments::{dataset_for_compression, prepare_jar, Segment}; -use reth_db::{static_file::create_static_file_T1, tables}; +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; -use reth_primitives::{ - static_file::{SegmentConfig, SegmentHeader}, - BlockNumber, StaticFileSegment, TxNumber, -}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRO, TransactionsProviderExt, + BlockReader, DatabaseProviderRO, }; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::{ops::RangeInclusive, path::Path}; +use std::ops::RangeInclusive; /// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. #[derive(Debug, Default)] @@ -55,56 +53,4 @@ impl Segment for Transactions { Ok(()) } - - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; - let tx_range_len = tx_range.clone().count(); - - let jar = prepare_jar::( - provider, - directory, - StaticFileSegment::Transactions, - config, - block_range, - tx_range_len, - || { - Ok([dataset_for_compression::( - provider, - &tx_range, - tx_range_len, - )?]) - }, - )?; - - // Generate list of hashes for filters & PHF - let hashes = if config.filters.has_filters() { - Some( - provider - .transaction_hashes_by_range(*tx_range.start()..(*tx_range.end() + 1))? - .into_iter() - .map(|(tx, _)| Ok(tx)), - ) - } else { - None - }; - - create_static_file_T1::( - provider.tx_ref(), - tx_range, - None, - // We already prepared the dictionary beforehand - None::>>>, - hashes, - tx_range_len, - jar, - )?; - - Ok(()) - } } diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 4b00a9eab325..e93d6013e047 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -1,12 +1,17 @@ //! Support for producing static files. use crate::{segments, segments::Segment, StaticFileProducerEvent}; +use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; use reth_db_api::database::Database; -use reth_primitives::{static_file::HighestStaticFiles, BlockNumber}; -use reth_provider::{providers::StaticFileWriter, ProviderFactory, StaticFileProviderFactory}; +use reth_provider::{ + providers::StaticFileWriter, ProviderFactory, StageCheckpointReader as _, + StaticFileProviderFactory, +}; use reth_prune_types::PruneModes; +use reth_stages_types::StageId; +use reth_static_file_types::HighestStaticFiles; use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; use std::{ @@ -55,7 +60,7 @@ pub struct StaticFileProducerInner { event_sender: EventSender, } -/// Static File targets, per data part, measured in [`BlockNumber`]. +/// Static File targets, per data segment, measured in [`BlockNumber`]. #[derive(Debug, Clone, Eq, PartialEq)] pub struct StaticFileTargets { headers: Option>, @@ -166,6 +171,28 @@ impl StaticFileProducerInner { Ok(targets) } + /// Copies data from database to static files according to + /// [stage checkpoints](reth_stages_types::StageCheckpoint). + /// + /// Returns highest block numbers for all static file segments. + pub fn copy_to_static_files(&self) -> ProviderResult { + let provider = self.provider_factory.provider()?; + let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] + .into_iter() + .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) + .collect::, _>>()?; + + let highest_static_files = HighestStaticFiles { + headers: stages_checkpoints[0], + receipts: stages_checkpoints[1], + transactions: stages_checkpoints[2], + }; + let targets = self.get_static_file_targets(highest_static_files)?; + self.run(targets)?; + + Ok(highest_static_files) + } + /// Returns a static file targets at the provided finalized block numbers per segment. /// The target is determined by the check against highest `static_files` using /// [`reth_provider::providers::StaticFileProvider::get_highest_static_files`]. @@ -228,15 +255,16 @@ mod tests { use crate::static_file_producer::{ StaticFileProducer, StaticFileProducerInner, StaticFileTargets, }; + use alloy_primitives::{B256, U256}; use assert_matches::assert_matches; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{database::Database, transaction::DbTx}; - use reth_primitives::{static_file::HighestStaticFiles, StaticFileSegment, B256, U256}; use reth_provider::{ providers::StaticFileWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_static_file_types::{HighestStaticFiles, StaticFileSegment}; use reth_testing_utils::{ generators, generators::{random_block_range, random_receipt}, diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index f78d61f6961b..556ec8f90676 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -20,7 +20,7 @@ pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFil /// Default static file block count. pub const BLOCKS_PER_STATIC_FILE: u64 = 500_000; -/// Highest static file block numbers, per data part. +/// Highest static file block numbers, per data segment. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub struct HighestStaticFiles { /// Highest static file block of headers, inclusive. @@ -53,6 +53,11 @@ impl HighestStaticFiles { } } + /// Returns the minimum block of all segments. + pub fn min(&self) -> Option { + [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).min() + } + /// Returns the maximum block of all segments. pub fn max(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index e370233d1c7d..6f286a95b411 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -37,6 +37,7 @@ serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true +proptest-arbitrary-interop.workspace = true [features] default = ["std", "alloy"] diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 3e36f940527a..60bc9074a91a 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -47,10 +47,10 @@ pub fn maybe_generate_tests(args: TokenStream, ast: &DeriveInput) -> TokenStream #[test] fn malformed_rlp_header_check() { - use rand::RngCore; + use rand::RngCore; // get random instance of type - let mut raw = [0u8;1024]; + let mut raw = [0u8; 1024]; rand::thread_rng().fill_bytes(&mut raw); let mut unstructured = arbitrary::Unstructured::new(&raw[..]); let val = ::arbitrary(&mut unstructured); @@ -72,7 +72,6 @@ pub fn maybe_generate_tests(args: TokenStream, ast: &DeriveInput) -> TokenStream let res = super::#type_ident::decode(&mut b.as_ref()); assert!(res.is_err(), "malformed header was decoded"); } - }); } else if let Ok(num) = arg.to_string().parse() { default_cases = num; @@ -88,12 +87,13 @@ pub fn maybe_generate_tests(args: TokenStream, ast: &DeriveInput) -> TokenStream #[cfg(test)] mod #mod_tests { #(#traits)* + use proptest_arbitrary_interop::arb; #[test] fn proptest() { let mut config = proptest::prelude::ProptestConfig::with_cases(#default_cases as u32); - proptest::proptest!(config, |(field: super::#type_ident)| { + proptest::proptest!(config, |(field in arb::())| { #(#roundtrips)* }); } diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 7c668a6cb4aa..e0022edc4cdb 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -69,7 +69,7 @@ pub fn main_codec(args: TokenStream, input: TokenStream) -> TokenStream { derive_arbitrary(TokenStream::from_iter(args), compact) } -/// Adds `Arbitrary` and `proptest::Arbitrary` imports into scope and derives the struct/enum. +/// Adds `Arbitrary` imports into scope and derives the struct/enum. /// /// If `compact` or `rlp` is passed to `derive_arbitrary`, there will be proptest roundtrip tests /// generated. An integer value passed will limit the number of proptest cases generated (default: @@ -89,17 +89,13 @@ pub fn derive_arbitrary(args: TokenStream, input: TokenStream) -> TokenStream { let tests = arbitrary::maybe_generate_tests(args, &ast); // Avoid duplicate names - let prop_import = format_ident!("{}PropTestArbitrary", ast.ident); let arb_import = format_ident!("{}Arbitrary", ast.ident); quote! { - #[cfg(any(test, feature = "arbitrary"))] - use proptest_derive::Arbitrary as #prop_import; - #[cfg(any(test, feature = "arbitrary"))] use arbitrary::Arbitrary as #arb_import; - #[cfg_attr(any(test, feature = "arbitrary"), derive(#prop_import, #arb_import))] + #[cfg_attr(any(test, feature = "arbitrary"), derive(#arb_import))] #ast #tests diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs index d3f906318848..f5564e81601a 100644 --- a/crates/storage/codecs/src/alloy/access_list.rs +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -8,12 +8,11 @@ impl Compact for AccessListItem { where B: bytes::BufMut + AsMut<[u8]>, { - let mut buffer = bytes::BytesMut::new(); + let mut buffer = Vec::new(); self.address.to_compact(&mut buffer); self.storage_keys.specialized_to_compact(&mut buffer); - let total_length = buffer.len(); - buf.put(buffer); - total_length + buf.put(&buffer[..]); + buffer.len() } fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { @@ -31,11 +30,10 @@ impl Compact for AccessList { where B: bytes::BufMut + AsMut<[u8]>, { - let mut buffer = bytes::BytesMut::new(); + let mut buffer = Vec::new(); self.0.to_compact(&mut buffer); - let total_length = buffer.len(); - buf.put(buffer); - total_length + buf.put(&buffer[..]); + buffer.len() } fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { diff --git a/crates/storage/codecs/src/alloy/log.rs b/crates/storage/codecs/src/alloy/log.rs index 8d5c30e0a0b3..eadcb894f3f1 100644 --- a/crates/storage/codecs/src/alloy/log.rs +++ b/crates/storage/codecs/src/alloy/log.rs @@ -10,13 +10,12 @@ impl Compact for LogData { where B: BufMut + AsMut<[u8]>, { - let mut buffer = bytes::BytesMut::new(); + let mut buffer = Vec::new(); let (topics, data) = self.split(); topics.specialized_to_compact(&mut buffer); data.to_compact(&mut buffer); - let total_length = buffer.len(); - buf.put(buffer); - total_length + buf.put(&buffer[..]); + buffer.len() } fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { @@ -33,12 +32,11 @@ impl Compact for Log { where B: BufMut + AsMut<[u8]>, { - let mut buffer = bytes::BytesMut::new(); + let mut buffer = Vec::new(); self.address.to_compact(&mut buffer); self.data.to_compact(&mut buffer); - let total_length = buffer.len(); - buf.put(buffer); - total_length + buf.put(&buffer[..]); + buffer.len() } fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { diff --git a/crates/storage/codecs/src/alloy/request.rs b/crates/storage/codecs/src/alloy/request.rs index c732e30b2bba..388f2a2b5291 100644 --- a/crates/storage/codecs/src/alloy/request.rs +++ b/crates/storage/codecs/src/alloy/request.rs @@ -26,10 +26,11 @@ impl Compact for Request { mod tests { use super::*; use proptest::proptest; + use proptest_arbitrary_interop::arb; proptest! { #[test] - fn roundtrip(request: Request) { + fn roundtrip(request in arb::()) { let mut buf = Vec::::new(); request.to_compact(&mut buf); let (decoded, _) = Request::from_compact(&buf, buf.len()); diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 80fd549c0285..9e80bc1cf15d 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -14,8 +14,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index fbbf0ca5d7be..7286e03f2da4 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -15,10 +15,11 @@ workspace = true # reth reth-codecs.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-storage-errors.workspace = true reth-stages-types.workspace = true -reth-trie-types.workspace = true +reth-trie-common.workspace = true # codecs modular-bitfield.workspace = true @@ -35,7 +36,6 @@ bytes.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } [dev-dependencies] # reth libs with arbitrary @@ -53,10 +53,11 @@ pprof = { workspace = true, features = [ "criterion", ] } criterion.workspace = true -iai-callgrind = "0.10.2" +iai-callgrind.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true paste.workspace = true @@ -69,6 +70,5 @@ arbitrary = [ "reth-primitives/arbitrary", "dep:arbitrary", "dep:proptest", - "dep:proptest-derive", ] -optimism = [] +optimism = ["reth-primitives/optimism"] diff --git a/crates/storage/db-api/src/lib.rs b/crates/storage/db-api/src/lib.rs index 284321092320..cd25b3c65fa0 100644 --- a/crates/storage/db-api/src/lib.rs +++ b/crates/storage/db-api/src/lib.rs @@ -58,8 +58,6 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] /// Common types used throughout the abstraction. pub mod common; @@ -81,3 +79,5 @@ pub mod models; mod scale; mod utils; + +pub use database::Database; diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index e419a9435129..f47605bf88b5 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -4,7 +4,7 @@ use crate::{ table::{Compress, Decompress}, DatabaseError, }; -use reth_primitives::IntegerList; +use reth_primitives_traits::IntegerList; impl Compress for IntegerList { type Compressed = Vec; diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 7438feedea51..df6467336507 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,7 +8,7 @@ use reth_codecs::{main_codec, Compact}; use reth_primitives::{Address, B256, *}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_types::{StoredNibbles, StoredNibblesSubKey, *}; +use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; pub mod accounts; pub mod blocks; @@ -314,70 +314,69 @@ mod tests { // // this check is to ensure we do not inadvertently add too many fields to a struct which would // expand the flags field and break backwards compatibility + #[cfg(not(feature = "optimism"))] #[test] fn test_ensure_backwards_compatibility() { - #[cfg(not(feature = "optimism"))] - { - assert_eq!(Account::bitflag_encoded_bytes(), 2); - assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); - assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); - assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); - assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); - assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(Header::bitflag_encoded_bytes(), 4); - assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); - assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); - assert_eq!(Receipt::bitflag_encoded_bytes(), 1); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); - assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); - assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); - assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); - assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); - assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); - assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); - assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); - } - - #[cfg(feature = "optimism")] - { - assert_eq!(Account::bitflag_encoded_bytes(), 2); - assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); - assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); - assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); - assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); - assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(Header::bitflag_encoded_bytes(), 4); - assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); - assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); - assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); - assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); - assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); - assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); - assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); - assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); - assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); - assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); - } + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 1); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + + #[cfg(feature = "optimism")] + #[test] + fn test_ensure_backwards_compatibility() { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 2); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); } } diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 0e6a3720dc87..d80236defd32 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -9,6 +9,7 @@ repository.workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-primitives.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true @@ -18,10 +19,15 @@ reth-trie.workspace = true reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true +reth-fs-util.workspace = true + +# eth +alloy-genesis.workspace = true # misc eyre.workspace = true thiserror.workspace = true +boyer-moore-magiclen.workspace = true # io serde.workspace = true @@ -30,5 +36,8 @@ serde_json.workspace = true # tracing tracing.workspace = true +[dev-dependencies] +reth-primitives-traits.workspace = true + [lints] workspace = true diff --git a/bin/reth/src/utils.rs b/crates/storage/db-common/src/db_tool/mod.rs similarity index 94% rename from bin/reth/src/utils.rs rename to crates/storage/db-common/src/db_tool/mod.rs index 6e5089d2754b..6da09900faf6 100644 --- a/bin/reth/src/utils.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -1,7 +1,8 @@ -//! Common CLI utility functions. +//! Common db operations use boyer_moore_magiclen::BMByte; use eyre::Result; +use reth_chainspec::ChainSpec; use reth_db::{RawTable, TableRawRow}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -11,20 +12,10 @@ use reth_db_api::{ DatabaseError, }; use reth_fs_util as fs; -use reth_primitives::ChainSpec; use reth_provider::{ChainSpecProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; -/// Exposing `open_db_read_only` function -pub mod db { - pub use reth_db::open_db_read_only; -} - -/// Re-exported from `reth_node_core`, also to prevent a breaking change. See the comment on -/// the `reth_node_core::args` re-export for more details. -pub use reth_node_core::utils::*; - /// Wrapper over DB that implements many useful DB queries. #[derive(Debug)] pub struct DbTool { diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index b2c00220a065..24c2af83b853 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -1,13 +1,14 @@ //! Reth genesis initialization utility functions. +use alloy_genesis::GenesisAccount; +use reth_chainspec::ChainSpec; use reth_codecs::Compact; use reth_config::config::EtlConfig; use reth_db::tables; use reth_db_api::{database::Database, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_primitives::{ - Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, StaticFileSegment, - StorageEntry, B256, U256, + Account, Address, Bytecode, Receipts, StaticFileSegment, StorageEntry, B256, U256, }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, @@ -463,19 +464,17 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: .root_with_progress()? { StateRootProgress::Progress(state, _, updates) => { - let updates_len = updates.len(); + let updated_len = updates.write_to_database(tx)?; + total_flushed_updates += updated_len; trace!(target: "reth::cli", last_account_key = %state.last_account_key, - updates_len, + updated_len, total_flushed_updates, "Flushing trie updates" ); intermediate_state = Some(*state); - updates.flush(tx)?; - - total_flushed_updates += updates_len; if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 { info!(target: "reth::cli", @@ -485,15 +484,12 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: } } StateRootProgress::Complete(root, _, updates) => { - let updates_len = updates.len(); - - updates.flush(tx)?; - - total_flushed_updates += updates_len; + let updated_len = updates.write_to_database(tx)?; + total_flushed_updates += updated_len; trace!(target: "reth::cli", %root, - updates_len = updates_len, + updated_len, total_flushed_updates, "State root has been computed" ); @@ -524,6 +520,8 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; + use alloy_genesis::Genesis; + use reth_chainspec::{Chain, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; use reth_db_api::{ cursor::DbCursorRO, @@ -531,10 +529,8 @@ mod tests { table::{Table, TableRow}, transaction::DbTx, }; - use reth_primitives::{ - Chain, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, - SEPOLIA, SEPOLIA_GENESIS_HASH, - }; + use reth_primitives::{HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use reth_primitives_traits::IntegerList; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; fn collect_table_entries( @@ -557,21 +553,21 @@ mod tests { } #[test] - fn success_init_genesis_goerli() { + fn success_init_genesis_sepolia() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(GOERLI.clone())).unwrap(); + init_genesis(create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap(); // actual, expected - assert_eq!(genesis_hash, GOERLI_GENESIS_HASH); + assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH); } #[test] - fn success_init_genesis_sepolia() { + fn success_init_genesis_holesky() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap(); + init_genesis(create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap(); // actual, expected - assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH); + assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH); } #[test] @@ -619,7 +615,7 @@ mod tests { ]), ..Default::default() }, - hardforks: BTreeMap::default(), + hardforks: Default::default(), genesis_hash: None, paris_block_and_final_difficulty: None, deposit_contract: None, diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index abcbc62762a4..173e53143408 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -9,3 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod init; + +mod db_tool; +pub use db_tool::*; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index ca27a9a40739..117ec5ccc7b6 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,17 +15,21 @@ workspace = true # reth reth-db-api.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-storage-errors.workspace = true -reth-libmdbx = { workspace = true, optional = true, features = [ - "return-borrowed", - "read-tx-timeouts", -] } reth-nippy-jar.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-tracing.workspace = true -reth-trie-types.workspace = true +reth-trie-common.workspace = true + +# mdbx +reth-libmdbx = { workspace = true, optional = true, features = [ + "return-borrowed", + "read-tx-timeouts", +] } +eyre = { workspace = true, optional = true } # codecs serde = { workspace = true, default-features = false } @@ -40,10 +44,9 @@ page_size = "0.6.0" thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true -eyre.workspace = true paste.workspace = true rustc-hash.workspace = true -sysinfo = "0.30" +sysinfo = { version = "0.30", default-features = false } # arbitrary utils strum = { workspace = true, features = ["derive"] } @@ -63,7 +66,7 @@ pprof = { workspace = true, features = [ "criterion", ] } criterion.workspace = true -iai-callgrind = "0.10.2" +iai-callgrind.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true @@ -74,11 +77,12 @@ assert_matches.workspace = true [features] default = ["mdbx"] -test-utils = ["tempfile", "arbitrary"] -mdbx = ["reth-libmdbx"] +mdbx = ["dep:reth-libmdbx", "dep:eyre"] +test-utils = ["dep:tempfile", "arbitrary"] bench = [] arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] optimism = [] +disable-lock = [] [[bench]] name = "hash_keys" diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index d37146fd1e28..1807e6f4a6ec 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -130,7 +130,6 @@ where /// Generates two batches. The first is to be inserted into the database before running the /// benchmark. The second is to be benchmarked with. -#[allow(clippy::type_complexity)] fn generate_batches(size: usize) -> (Vec>, Vec>) where T: Table, diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 956374072bca..e2afbe0c16c7 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -36,7 +36,7 @@ pub struct Cursor { } impl Cursor { - pub(crate) fn new_with_metrics( + pub(crate) const fn new_with_metrics( inner: reth_libmdbx::Cursor, metrics: Option>, ) -> Self { diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 614826fcd730..cd54ec7b476a 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -41,7 +41,6 @@ const DEFAULT_MAX_READERS: u64 = 32_000; /// Space that a read-only transaction can occupy until the warning is emitted. /// See [`reth_libmdbx::EnvironmentBuilder::set_handle_slow_readers`] for more information. -#[cfg(not(windows))] const MAX_SAFE_READER_SPACE: usize = 10 * GIGABYTE; /// Environment used when opening a MDBX environment. RO/RW. @@ -483,7 +482,8 @@ mod tests { table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, Address, Header, IntegerList, StorageEntry, B256, U256}; + use reth_primitives::{Account, Address, Header, StorageEntry, B256, U256}; + use reth_primitives_traits::IntegerList; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; use tempfile::TempDir; diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 3b8099b7e4e5..e0da20348a53 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -1,5 +1,7 @@ //! Storage lock utils. +#![cfg_attr(feature = "disable-lock", allow(dead_code))] + use reth_storage_errors::lockfile::StorageLockError; use reth_tracing::tracing::error; use std::{ @@ -7,7 +9,7 @@ use std::{ process, sync::Arc, }; -use sysinfo::System; +use sysinfo::{ProcessRefreshKind, RefreshKind, System}; /// File lock name. const LOCKFILE_NAME: &str = "lock"; @@ -28,15 +30,31 @@ impl StorageLock { /// Note: In-process exclusivity is not on scope. If called from the same process (or another /// with the same PID), it will succeed. pub fn try_acquire(path: &Path) -> Result { - let path = path.join(LOCKFILE_NAME); + let file_path = path.join(LOCKFILE_NAME); - if let Some(pid) = parse_lock_file_pid(&path)? { - if pid != (process::id() as usize) && System::new_all().process(pid.into()).is_some() { - return Err(StorageLockError::Taken(pid)); - } + #[cfg(feature = "disable-lock")] + { + // Too expensive for ef-tests to write/read lock to/from disk. + Ok(Self(Arc::new(StorageLockInner { file_path }))) } - Ok(Self(Arc::new(StorageLockInner::new(path)?))) + #[cfg(not(feature = "disable-lock"))] + { + if let Some(process_lock) = ProcessUID::parse(&file_path)? { + if process_lock.pid != (process::id() as usize) && process_lock.is_active() { + error!( + target: "reth::db::lockfile", + path = ?file_path, + pid = process_lock.pid, + start_time = process_lock.start_time, + "Storage lock already taken." + ); + return Err(StorageLockError::Taken(process_lock.pid)) + } + } + + Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) + } } } @@ -66,19 +84,61 @@ impl StorageLockInner { reth_fs_util::create_dir_all(parent)?; } - reth_fs_util::write(&file_path, format!("{}", process::id()))?; + // Write this process unique identifier (pid & start_time) to file + ProcessUID::own().write(&file_path)?; Ok(Self { file_path }) } } -/// Parses the PID from the lock file if it exists. -fn parse_lock_file_pid(path: &Path) -> Result, StorageLockError> { - if path.exists() { - let contents = reth_fs_util::read_to_string(path)?; - return Ok(contents.trim().parse().ok()); +#[derive(Debug)] +struct ProcessUID { + /// OS process identifier + pid: usize, + /// Process start time + start_time: u64, +} + +impl ProcessUID { + /// Creates [`Self`] for the provided PID. + fn new(pid: usize) -> Option { + System::new_with_specifics(RefreshKind::new().with_processes(ProcessRefreshKind::new())) + .process(pid.into()) + .map(|process| Self { pid, start_time: process.start_time() }) + } + + /// Creates [`Self`] from own process. + fn own() -> Self { + Self::new(process::id() as usize).expect("own process") + } + + /// Parses [`Self`] from a file. + fn parse(path: &Path) -> Result, StorageLockError> { + if path.exists() { + if let Ok(contents) = reth_fs_util::read_to_string(path) { + let mut lines = contents.lines(); + if let (Some(Ok(pid)), Some(Ok(start_time))) = ( + lines.next().map(str::trim).map(str::parse), + lines.next().map(str::trim).map(str::parse), + ) { + return Ok(Some(Self { pid, start_time })); + } + } + } + Ok(None) + } + + /// Whether a process with this `pid` and `start_time` exists. + fn is_active(&self) -> bool { + System::new_with_specifics(RefreshKind::new().with_processes(ProcessRefreshKind::new())) + .process(self.pid.into()) + .is_some_and(|p| p.start_time() == self.start_time) + } + + /// Writes `pid` and `start_time` to a file. + fn write(&self, path: &Path) -> Result<(), StorageLockError> { + Ok(reth_fs_util::write(path, format!("{}\n{}", self.pid, self.start_time))?) } - Ok(None) } #[cfg(test)] @@ -101,12 +161,19 @@ mod tests { while system.process(fake_pid.into()).is_some() { fake_pid += 1; } - reth_fs_util::write(&lock_file, format!("{}", fake_pid)).unwrap(); - assert_eq!(Ok(lock), StorageLock::try_acquire(temp_dir.path())); + ProcessUID { pid: fake_pid, start_time: u64::MAX }.write(&lock_file).unwrap(); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + + let mut pid_1 = ProcessUID::new(1).unwrap(); - // A lock of a different but existing PID cannot be acquired. - reth_fs_util::write(&lock_file, "1").unwrap(); + // If a parsed `ProcessUID` exists, the lock can NOT be acquired. + pid_1.write(&lock_file).unwrap(); assert_eq!(Err(StorageLockError::Taken(1)), StorageLock::try_acquire(temp_dir.path())); + + // A lock of a different but existing PID can be acquired ONLY IF the start_time differs. + pid_1.start_time += 1; + pid_1.write(&lock_file).unwrap(); + assert_eq!(Ok(lock), StorageLock::try_acquire(temp_dir.path())); } #[test] @@ -116,8 +183,8 @@ mod tests { let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + assert!(lock_file.exists()); drop(lock); - assert!(!lock_file.exists()); } } diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index 328b9caabfdf..d6947e10bd2b 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -1,12 +1,12 @@ //! Bindings for [MDBX](https://libmdbx.dqdkfa.ru/). -pub use crate::implementation::mdbx::*; -pub use reth_libmdbx::*; - use crate::is_database_empty; use eyre::Context; use std::path::Path; +pub use crate::implementation::mdbx::*; +pub use reth_libmdbx::*; + /// Creates a new database at the specified path if it doesn't exist. Does NOT create tables. Check /// [`init_db`]. pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Result { @@ -31,21 +31,17 @@ pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Resu /// Opens up an existing database or creates a new one at the specified path. Creates tables if /// necessary. Read/Write mode. pub fn init_db>(path: P, args: DatabaseArguments) -> eyre::Result { - { - let client_version = args.client_version().clone(); - let db = create_db(path, args)?; - db.create_tables()?; - db.record_client_version(client_version)?; - Ok(db) - } + let client_version = args.client_version().clone(); + let db = create_db(path, args)?; + db.create_tables()?; + db.record_client_version(client_version)?; + Ok(db) } /// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result { - { - DatabaseEnv::open(path, DatabaseEnvKind::RO, args) - .with_context(|| format!("Could not open database at path: {}", path.display())) - } + DatabaseEnv::open(path, DatabaseEnvKind::RO, args) + .with_context(|| format!("Could not open database at path: {}", path.display())) } /// Opens up an existing database. Read/Write mode with `WriteMap` enabled. It doesn't create it or diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index 0d0b68722e64..fecd691ee5d7 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -1,12 +1,8 @@ use crate::Tables; use metrics::{Gauge, Histogram}; use reth_metrics::{metrics::Counter, Metrics}; -use rustc_hash::{FxHashMap, FxHasher}; -use std::{ - collections::HashMap, - hash::BuildHasherDefault, - time::{Duration, Instant}, -}; +use rustc_hash::FxHashMap; +use std::time::{Duration, Instant}; use strum::{EnumCount, EnumIter, IntoEnumIterator}; const LARGE_VALUE_THRESHOLD_BYTES: usize = 4096; @@ -45,7 +41,7 @@ impl DatabaseEnvMetrics { fn generate_operation_handles() -> FxHashMap<(&'static str, Operation), OperationMetrics> { let mut operations = FxHashMap::with_capacity_and_hasher( Tables::COUNT * Operation::COUNT, - BuildHasherDefault::::default(), + Default::default(), ); for table in Tables::ALL { for operation in Operation::iter() { @@ -81,9 +77,9 @@ impl DatabaseEnvMetrics { /// Used for tracking various stats for finished transactions (e.g. commit duration). fn generate_transaction_outcome_handles( ) -> FxHashMap<(TransactionMode, TransactionOutcome), TransactionOutcomeMetrics> { - let mut transaction_outcomes = HashMap::with_capacity_and_hasher( + let mut transaction_outcomes = FxHashMap::with_capacity_and_hasher( TransactionMode::COUNT * TransactionOutcome::COUNT, - BuildHasherDefault::::default(), + Default::default(), ); for mode in TransactionMode::iter() { for outcome in TransactionOutcome::iter() { diff --git a/crates/storage/db/src/static_file/cursor.rs b/crates/storage/db/src/static_file/cursor.rs index c37cb59e92ff..e46213333038 100644 --- a/crates/storage/db/src/static_file/cursor.rs +++ b/crates/storage/db/src/static_file/cursor.rs @@ -10,6 +10,9 @@ use std::sync::Arc; #[derive(Debug, Deref, DerefMut)] pub struct StaticFileCursor<'a>(NippyJarCursor<'a, SegmentHeader>); +/// Type alias for column results with optional values. +type ColumnResult = ProviderResult>; + impl<'a> StaticFileCursor<'a> { /// Returns a new [`StaticFileCursor`]. pub fn new(jar: &'a NippyJar, reader: Arc) -> ProviderResult { @@ -56,7 +59,7 @@ impl<'a> StaticFileCursor<'a> { pub fn get_one( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> ProviderResult> { + ) -> ColumnResult { let row = self.get(key_or_num, M::MASK)?; match row { @@ -69,7 +72,7 @@ impl<'a> StaticFileCursor<'a> { pub fn get_two( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> ProviderResult> { + ) -> ColumnResult<(M::FIRST, M::SECOND)> { let row = self.get(key_or_num, M::MASK)?; match row { @@ -79,11 +82,10 @@ impl<'a> StaticFileCursor<'a> { } /// Gets three column values from a row. - #[allow(clippy::type_complexity)] pub fn get_three( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> ProviderResult> { + ) -> ColumnResult<(M::FIRST, M::SECOND, M::THIRD)> { let row = self.get(key_or_num, M::MASK)?; match row { diff --git a/crates/storage/db/src/static_file/generation.rs b/crates/storage/db/src/static_file/generation.rs deleted file mode 100644 index 9c2a64a23b18..000000000000 --- a/crates/storage/db/src/static_file/generation.rs +++ /dev/null @@ -1,115 +0,0 @@ -use crate::{RawKey, RawTable}; -use reth_db_api::{ - cursor::DbCursorRO, - table::{Key, Table}, - transaction::DbTx, -}; - -use reth_nippy_jar::{ColumnResult, NippyJar, NippyJarHeader, PHFKey}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_tracing::tracing::*; -use std::{error::Error as StdError, ops::RangeInclusive}; - -/// Macro that generates static file creation functions that take an arbitrary number of [`Table`] -/// and creates a [`NippyJar`] file out of their [`Table::Value`]. Each list of [`Table::Value`] -/// from a table is a column of values. -/// -/// Has membership filter set and compression dictionary support. -macro_rules! generate_static_file_func { - ($(($($tbl:ident),+)),+ $(,)? ) => { - $( - paste::item! { - /// Creates a static file from specified tables. Each table's `Value` iterator represents a column. - /// - /// **Ensure the range contains the same number of rows.** - /// - /// * `tx`: Database transaction. - /// * `range`: Data range for columns in tables. - /// * `additional`: Additional columns which can't be straight straightforwardly walked on. - /// * `keys`: IntoIterator of keys (eg. `TxHash` or `BlockHash`) with length equal to `row_count` and ordered by future column insertion from `range`. - /// * `dict_compression_set`: Sets of column data for compression dictionaries. Max size is 2GB. Row count is independent. - /// * `row_count`: Total rows to add to `NippyJar`. Must match row count in `range`. - /// * `nippy_jar`: Static File object responsible for file generation. - #[allow(non_snake_case)] - pub fn []< - $($tbl: Table,)+ - K, - H: NippyJarHeader - > - ( - tx: &impl DbTx, - range: RangeInclusive, - additional: Option, Box>>>>>, - dict_compression_set: Option>>>, - keys: Option>>, - row_count: usize, - mut nippy_jar: NippyJar - ) -> ProviderResult<()> - where K: Key + Copy - { - let additional = additional.unwrap_or_default(); - debug!(target: "reth::static_file", ?range, "Creating static file {:?} and {} more columns.", vec![$($tbl::NAME,)+], additional.len()); - - let range: RangeInclusive> = RawKey::new(*range.start())..=RawKey::new(*range.end()); - - // Create PHF and Filter if required - if let Some(keys) = keys { - debug!(target: "reth::static_file", "Calculating Filter, PHF and offset index list"); - match nippy_jar.prepare_index(keys, row_count) { - Ok(_) => { - debug!(target: "reth::static_file", "Filter, PHF and offset index list calculated."); - }, - Err(e) => { - return Err(ProviderError::NippyJar(e.to_string())); - } - } - } - - // Create compression dictionaries if required - if let Some(data_sets) = dict_compression_set { - debug!(target: "reth::static_file", "Creating compression dictionaries."); - match nippy_jar.prepare_compression(data_sets){ - Ok(_) => { - debug!(target: "reth::static_file", "Compression dictionaries created."); - }, - Err(e) => { - return Err(ProviderError::NippyJar(e.to_string())); - } - } - - } - - // Creates the cursors for the columns - $( - let mut [< $tbl _cursor>] = tx.cursor_read::>()?; - let [< $tbl _iter>] = [< $tbl _cursor>] - .walk_range(range.clone())? - .into_iter() - .map(|row| - row - .map(|(_key, val)| val.into_value()) - .map_err(|e| Box::new(e) as Box) - ); - - )+ - - // Create the static file from the data - let col_iterators: Vec,_>>>> = vec![ - $(Box::new([< $tbl _iter>]),)+ - ]; - - - debug!(target: "reth::static_file", jar=?nippy_jar, "Generating static file."); - - let nippy_jar = nippy_jar.freeze(col_iterators.into_iter().chain(additional).collect(), row_count as u64).map_err(|e| ProviderError::NippyJar(e.to_string())); - - debug!(target: "reth::static_file", jar=?nippy_jar, "Static file generated."); - - Ok(()) - } - } - )+ - }; -} - -generate_static_file_func!((T1), (T1, T2), (T1, T2, T3), (T1, T2, T3, T4), (T1, T2, T3, T4, T5),); diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index daa6f8a816bd..f27a574f640e 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -1,13 +1,10 @@ //! reth's static file database table import and access -mod generation; use std::{ collections::{hash_map::Entry, HashMap}, path::Path, }; -pub use generation::*; - mod cursor; pub use cursor::StaticFileCursor; diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index 1498dcc907bb..c244c056548c 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -1,6 +1,6 @@ //! Curates the input coming from the fuzzer for certain types. -use reth_primitives::IntegerList; +use reth_primitives_traits::IntegerList; use serde::{Deserialize, Serialize}; /// Makes sure that the list provided by the fuzzer is not empty and pre-sorted diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index 826f44d43f0b..1d038bf7e65d 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -19,6 +19,9 @@ macro_rules! impl_fuzzer_with_input { #[allow(unused_imports)] use reth_primitives::*; + #[allow(unused_imports)] + use reth_primitives_traits::*; + #[allow(unused_imports)] use super::inputs::*; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 5d3c685a526f..af350b74ae82 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -11,9 +11,6 @@ //! //! TODO(onbjerg): Find appropriate format for this... -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] - pub mod codecs; mod raw; @@ -32,12 +29,13 @@ use reth_db_api::{ table::{Decode, DupSort, Encode, Table}, }; use reth_primitives::{ - Account, Address, BlockHash, BlockNumber, Bytecode, Header, IntegerList, Receipt, Requests, - StorageEntry, TransactionSignedNoHash, TxHash, TxNumber, B256, + Account, Address, BlockHash, BlockNumber, Bytecode, Header, Receipt, Requests, StorageEntry, + TransactionSignedNoHash, TxHash, TxNumber, B256, }; +use reth_primitives_traits::IntegerList; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_types::{StorageTrieEntry, StoredBranchNode, StoredNibbles, StoredNibblesSubKey}; +use reth_trie_common::{StorageTrieEntry, StoredBranchNode, StoredNibbles, StoredNibblesSubKey}; use serde::{Deserialize, Serialize}; use std::fmt; diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 1b501b4c5642..1e8fa56b3603 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -53,7 +53,7 @@ impl RawKey { /// Creates a raw key from an existing `Vec`. Useful when we already have the encoded /// key. - pub fn from_vec(vec: Vec) -> Self { + pub const fn from_vec(vec: Vec) -> Self { Self { key: vec, _phantom: std::marker::PhantomData } } @@ -118,7 +118,7 @@ impl RawValue { /// Creates a raw value from an existing `Vec`. Useful when we already have the encoded /// value. - pub fn from_vec(vec: Vec) -> Self { + pub const fn from_vec(vec: Vec) -> Self { Self { value: vec, _phantom: std::marker::PhantomData } } diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 5fa806345269..d8e699f8df40 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -14,6 +14,8 @@ workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true -thiserror.workspace = true +thiserror-no-std = { workspace = true, default-features = false } - +[features] +default = ["std"] +std = ["thiserror-no-std/std"] \ No newline at end of file diff --git a/crates/storage/errors/src/db.rs b/crates/storage/errors/src/db.rs index b731e4d240dc..8b4896d23236 100644 --- a/crates/storage/errors/src/db.rs +++ b/crates/storage/errors/src/db.rs @@ -1,8 +1,19 @@ -use std::{fmt::Display, str::FromStr}; -use thiserror::Error; +#[cfg(feature = "std")] +use std::{fmt::Display, str::FromStr, string::String}; + +#[cfg(not(feature = "std"))] +use alloc::{ + boxed::Box, + format, + string::{String, ToString}, + vec::Vec, +}; + +#[cfg(not(feature = "std"))] +use core::{fmt::Display, str::FromStr}; /// Database error type. -#[derive(Clone, Debug, PartialEq, Eq, Error)] +#[derive(Clone, Debug, PartialEq, Eq, thiserror_no_std::Error)] pub enum DatabaseError { /// Failed to open the database. #[error("failed to open the database: {0}")] @@ -43,7 +54,7 @@ pub enum DatabaseError { } /// Common error struct to propagate implementation-specific error information. -#[derive(Debug, Error, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, thiserror_no_std::Error)] #[error("{message} ({code})")] pub struct DatabaseErrorInfo { /// Human-readable error message. @@ -70,7 +81,7 @@ impl From for DatabaseError { } /// Database write error. -#[derive(Clone, Debug, PartialEq, Eq, Error)] +#[derive(Clone, Debug, PartialEq, Eq, thiserror_no_std::Error)] #[error( "write operation {operation:?} failed for key \"{key}\" in table {table_name:?}: {info}", key = reth_primitives::hex::encode(key), @@ -179,7 +190,7 @@ impl FromStr for LogLevel { "debug" => Ok(Self::Debug), "trace" => Ok(Self::Trace), "extra" => Ok(Self::Extra), - _ => Err(format!("Invalid log level: {}", s)), + _ => Err(format!("Invalid log level: {s}")), } } } diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index 8247c635270e..dc8d24a160d2 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -7,6 +7,10 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; /// Database error pub mod db; diff --git a/crates/storage/errors/src/lockfile.rs b/crates/storage/errors/src/lockfile.rs index 674485457ab8..db27cb6e2e45 100644 --- a/crates/storage/errors/src/lockfile.rs +++ b/crates/storage/errors/src/lockfile.rs @@ -1,8 +1,10 @@ use reth_fs_util::FsPathError; -use thiserror::Error; -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[cfg(not(feature = "std"))] +use alloc::string::{String, ToString}; + /// Storage lock error. +#[derive(Debug, Clone, PartialEq, Eq, thiserror_no_std::Error)] pub enum StorageLockError { /// Write lock taken #[error("storage directory is currently in use as read-write by another process: PID {0}")] diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 7472d500cab2..db59d671fef7 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -2,14 +2,21 @@ use reth_primitives::{ Address, BlockHash, BlockHashOrNumber, BlockNumber, GotExpected, StaticFileSegment, TxHashOrNumber, TxNumber, B256, U256, }; + +#[cfg(feature = "std")] use std::path::PathBuf; -use thiserror::Error; + +#[cfg(not(feature = "std"))] +use alloc::{ + boxed::Box, + string::{String, ToString}, +}; /// Provider result type. pub type ProviderResult = Result; /// Bundled errors variants thrown by various providers. -#[derive(Clone, Debug, Error, PartialEq, Eq)] +#[derive(Clone, Debug, thiserror_no_std::Error, PartialEq, Eq)] pub enum ProviderError { /// Database error. #[error(transparent)] @@ -89,9 +96,6 @@ pub enum ProviderError { /// Thrown when we were unable to find a state for a block hash. #[error("no state found for block {0}")] StateForHashNotFound(B256), - /// Unable to compute state root on top of historical block. - #[error("unable to compute state root on top of historical block")] - StateRootNotAvailableForHistoricalBlock, /// Unable to find the block number for a given transaction index. #[error("unable to find the block number for a given transaction index")] BlockNumberForTransactionIndexNotFound, @@ -108,6 +112,7 @@ pub enum ProviderError { #[error("this provider does not support this request")] UnsupportedProvider, /// Static File is not found at specified path. + #[cfg(feature = "std")] #[error("not able to find {0} static file at {1}")] MissingStaticFilePath(StaticFileSegment, PathBuf), /// Static File is not found for requested block. @@ -143,7 +148,7 @@ impl From for ProviderError { } /// A root mismatch error at a given block height. -#[derive(Clone, Debug, Error, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, thiserror_no_std::Error)] #[error("root mismatch at #{block_number} ({block_hash}): {root}")] pub struct RootMismatch { /// The target block root diff. @@ -155,7 +160,7 @@ pub struct RootMismatch { } /// Consistent database view error. -#[derive(Clone, Debug, Error, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, thiserror_no_std::Error)] pub enum ConsistentViewError { /// Error thrown on attempt to initialize provider while node is still syncing. #[error("node is syncing. best block: {best_block:?}")] diff --git a/crates/storage/libmdbx-rs/Cargo.lock b/crates/storage/libmdbx-rs/Cargo.lock deleted file mode 100644 index 18c3cf63dc98..000000000000 --- a/crates/storage/libmdbx-rs/Cargo.lock +++ /dev/null @@ -1,1012 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "bindgen" -version = "0.60.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - -[[package]] -name = "bumpalo" -version = "3.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cc" -version = "1.0.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "clang-sys" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "bitflags", - "textwrap", - "unicode-width", -] - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "criterion" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" -dependencies = [ - "atty", - "cast", - "clap", - "criterion-plot", - "csv", - "itertools", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_cbor", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "csv" -version = "1.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" -dependencies = [ - "bstr", - "csv-core", - "itoa 0.4.8", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" -dependencies = [ - "memchr", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version", - "syn", -] - -[[package]] -name = "either" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" - -[[package]] -name = "fastrand" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" -dependencies = [ - "instant", -] - -[[package]] -name = "getrandom" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "indexmap" -version = "1.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" - -[[package]] -name = "js-sys" -version = "0.3.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55edcf6c0bb319052dea84732cf99db461780fd5e8d3eb46ab6ff312ab31f197" - -[[package]] -name = "libloading" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" -dependencies = [ - "cfg-if", - "winapi", -] - -[[package]] -name = "lifetimed-bytes" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c970c8ea4c7b023a41cfa4af4c785a16694604c2f2a3b0d1f20a9bcb73fa550" -dependencies = [ - "bytes", -] - -[[package]] -name = "lock_api" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "mdbx-sys" -version = "0.11.8-0" -dependencies = [ - "bindgen", - "cc", - "libc", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "nom" -version = "7.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" - -[[package]] -name = "oorandom" -version = "11.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-sys", -] - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "plotters" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" - -[[package]] -name = "plotters-svg" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" - -[[package]] -name = "proc-macro2" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_xorshift" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rayon" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" -dependencies = [ - "autocfg", - "crossbeam-deque", - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-utils", - "num_cpus", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - -[[package]] -name = "regex-syntax" -version = "0.6.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "reth-libmdbx" -version = "0.1.6" -dependencies = [ - "bitflags", - "byteorder", - "criterion", - "derive_more", - "indexmap", - "libc", - "lifetimed-bytes", - "mdbx-sys", - "parking_lot", - "rand", - "rand_xorshift", - "tempfile", - "thiserror", -] - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "ryu" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "semver" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" - -[[package]] -name = "serde" -version = "1.0.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" - -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" -dependencies = [ - "itoa 1.0.4", - "ryu", - "serde", -] - -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - -[[package]] -name = "smallvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" - -[[package]] -name = "syn" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "unicode-ident" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "walkdir" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" - -[[package]] -name = "web-sys" -version = "0.3.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 68576e0d066e..8056b68557b8 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -1,32 +1,28 @@ [package] name = "reth-libmdbx" +description = "Idiomatic and safe MDBX wrapper" version.workspace = true edition.workspace = true rust-version.workspace = true license = "Apache-2.0" -description = "Idiomatic and safe MDBX wrapper with good licence" homepage.workspace = true repository.workspace = true [lints] workspace = true -[lib] -name = "reth_libmdbx" - [dependencies] +reth-mdbx-sys.workspace = true + bitflags.workspace = true byteorder = "1" derive_more.workspace = true indexmap = "2" -libc = "0.2" parking_lot.workspace = true thiserror.workspace = true dashmap = { workspace = true, features = ["inline"], optional = true } tracing.workspace = true -reth-mdbx-sys.workspace = true - [features] default = [] return-borrowed = [] diff --git a/crates/storage/libmdbx-rs/LICENSE b/crates/storage/libmdbx-rs/LICENSE deleted file mode 100644 index fec6b4387638..000000000000 --- a/crates/storage/libmdbx-rs/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Dan Burkert - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/crates/storage/libmdbx-rs/benches/transaction.rs b/crates/storage/libmdbx-rs/benches/transaction.rs index 8cc84b01f3c4..33d25cdaa68a 100644 --- a/crates/storage/libmdbx-rs/benches/transaction.rs +++ b/crates/storage/libmdbx-rs/benches/transaction.rs @@ -2,7 +2,6 @@ mod utils; use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use libc::size_t; use rand::{prelude::SliceRandom, SeedableRng}; use rand_xorshift::XorShiftRng; use reth_libmdbx::{ffi::*, ObjectLength, WriteFlags}; @@ -46,10 +45,10 @@ fn bench_get_rand_raw(c: &mut Criterion) { c.bench_function("bench_get_rand_raw", |b| { b.iter(|| unsafe { txn.txn_execute(|txn| { - let mut i: size_t = 0; + let mut i = 0; for key in &keys { - key_val.iov_len = key.len() as size_t; - key_val.iov_base = key.as_bytes().as_ptr() as *mut _; + key_val.iov_len = key.len(); + key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast(); mdbx_get(txn, dbi, &key_val, &mut data_val); @@ -102,12 +101,12 @@ fn bench_put_rand_raw(c: &mut Criterion) { env.with_raw_env_ptr(|env| { mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &mut txn, ptr::null_mut()); - let mut i: ::libc::c_int = 0; + let mut i = 0; for (key, data) in &items { - key_val.iov_len = key.len() as size_t; - key_val.iov_base = key.as_bytes().as_ptr() as *mut _; - data_val.iov_len = data.len() as size_t; - data_val.iov_base = data.as_bytes().as_ptr() as *mut _; + key_val.iov_len = key.len(); + key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast(); + data_val.iov_len = data.len(); + data_val.iov_base = data.as_bytes().as_ptr().cast_mut().cast(); i += mdbx_put(txn, dbi, &key_val, &mut data_val, 0); } diff --git a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml index fbdad4c51072..8cd56d1f2791 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml +++ b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml @@ -1,19 +1,13 @@ [package] name = "reth-mdbx-sys" +description = "Raw bindings for libmdbx" version.workspace = true edition.workspace = true rust-version.workspace = true license = "Apache-2.0" -description = "Rust bindings for libmdbx with good licence." homepage.workspace = true repository.workspace = true -[lib] -name = "reth_mdbx_sys" - -[dependencies] -libc = "0.2" - [build-dependencies] cc = "1.0" bindgen = { version = "0.69", default-features = false, features = ["runtime"] } diff --git a/crates/storage/libmdbx-rs/mdbx-sys/build.rs b/crates/storage/libmdbx-rs/mdbx-sys/build.rs index 5f82d02b4c54..c265d02e2336 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/build.rs +++ b/crates/storage/libmdbx-rs/mdbx-sys/build.rs @@ -1,110 +1,112 @@ -use bindgen::{ - callbacks::{IntKind, ParseCallbacks}, - Formatter, +use std::{ + env, + path::{Path, PathBuf}, }; -use std::{env, path::PathBuf}; - -#[derive(Debug)] -struct Callbacks; - -impl ParseCallbacks for Callbacks { - fn int_macro(&self, name: &str, _value: i64) -> Option { - match name { - "MDBX_SUCCESS" | - "MDBX_KEYEXIST" | - "MDBX_NOTFOUND" | - "MDBX_PAGE_NOTFOUND" | - "MDBX_CORRUPTED" | - "MDBX_PANIC" | - "MDBX_VERSION_MISMATCH" | - "MDBX_INVALID" | - "MDBX_MAP_FULL" | - "MDBX_DBS_FULL" | - "MDBX_READERS_FULL" | - "MDBX_TLS_FULL" | - "MDBX_TXN_FULL" | - "MDBX_CURSOR_FULL" | - "MDBX_PAGE_FULL" | - "MDBX_MAP_RESIZED" | - "MDBX_INCOMPATIBLE" | - "MDBX_BAD_RSLOT" | - "MDBX_BAD_TXN" | - "MDBX_BAD_VALSIZE" | - "MDBX_BAD_DBI" | - "MDBX_LOG_DONTCHANGE" | - "MDBX_DBG_DONTCHANGE" | - "MDBX_RESULT_TRUE" | - "MDBX_UNABLE_EXTEND_MAPSIZE" | - "MDBX_PROBLEM" | - "MDBX_LAST_LMDB_ERRCODE" | - "MDBX_BUSY" | - "MDBX_EMULTIVAL" | - "MDBX_EBADSIGN" | - "MDBX_WANNA_RECOVERY" | - "MDBX_EKEYMISMATCH" | - "MDBX_TOO_LARGE" | - "MDBX_THREAD_MISMATCH" | - "MDBX_TXN_OVERLAPPING" | - "MDBX_LAST_ERRCODE" => Some(IntKind::Int), - _ => Some(IntKind::UInt), - } - } -} fn main() { - let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); - mdbx.push("libmdbx"); + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); + let mdbx = manifest_dir.join("libmdbx"); println!("cargo:rerun-if-changed={}", mdbx.display()); - let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); - - let bindings = bindgen::Builder::default() - .header(mdbx.join("mdbx.h").to_string_lossy()) - .allowlist_var("^(MDBX|mdbx)_.*") - .allowlist_type("^(MDBX|mdbx)_.*") - .allowlist_function("^(MDBX|mdbx)_.*") - .size_t_is_usize(true) - .ctypes_prefix("::libc") - .parse_callbacks(Box::new(Callbacks)) - .layout_tests(false) - .prepend_enum_name(false) - .generate_comments(false) - .disable_header_comment() - .formatter(Formatter::Rustfmt) - .generate() - .expect("Unable to generate bindings"); - - bindings.write_to_file(out_path.join("bindings.rs")).expect("Couldn't write bindings!"); - - let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); - mdbx.push("libmdbx"); + let bindings = PathBuf::from(std::env::var("OUT_DIR").unwrap()).join("bindings.rs"); + generate_bindings(&mdbx, &bindings); - let mut cc_builder = cc::Build::new(); - cc_builder.flag_if_supported("-Wno-unused-parameter").flag_if_supported("-Wuninitialized"); + let mut cc = cc::Build::new(); + cc.flag_if_supported("-Wno-unused-parameter").flag_if_supported("-Wuninitialized"); if env::var("CARGO_CFG_TARGET_OS").unwrap() != "linux" { - cc_builder.flag_if_supported("-Wbad-function-cast"); + cc.flag_if_supported("-Wbad-function-cast"); } - let flags = format!("{:?}", cc_builder.get_compiler().cflags_env()); - cc_builder.define("MDBX_BUILD_FLAGS", flags.as_str()).define("MDBX_TXN_CHECKOWNER", "0"); + let flags = format!("{:?}", cc.get_compiler().cflags_env()); + cc.define("MDBX_BUILD_FLAGS", flags.as_str()).define("MDBX_TXN_CHECKOWNER", "0"); // Enable debugging on debug builds #[cfg(debug_assertions)] - cc_builder.define("MDBX_DEBUG", "1").define("MDBX_ENABLE_PROFGC", "1"); + cc.define("MDBX_DEBUG", "1").define("MDBX_ENABLE_PROFGC", "1"); // Disables debug logging on optimized builds #[cfg(not(debug_assertions))] - cc_builder.define("MDBX_DEBUG", "0").define("NDEBUG", None); + cc.define("MDBX_DEBUG", "0").define("NDEBUG", None); // Propagate `-C target-cpu=native` let rustflags = env::var("CARGO_ENCODED_RUSTFLAGS").unwrap(); if rustflags.contains("target-cpu=native") && env::var("CARGO_CFG_TARGET_ENV").unwrap() != "msvc" { - cc_builder.flag("-march=native"); + cc.flag("-march=native"); } - cc_builder.file(mdbx.join("mdbx.c")).compile("libmdbx.a"); + cc.file(mdbx.join("mdbx.c")).compile("libmdbx.a"); +} + +fn generate_bindings(mdbx: &Path, out_file: &Path) { + use bindgen::{ + callbacks::{IntKind, ParseCallbacks}, + Formatter, + }; + + #[derive(Debug)] + struct Callbacks; + + impl ParseCallbacks for Callbacks { + fn int_macro(&self, name: &str, _value: i64) -> Option { + match name { + "MDBX_SUCCESS" | + "MDBX_KEYEXIST" | + "MDBX_NOTFOUND" | + "MDBX_PAGE_NOTFOUND" | + "MDBX_CORRUPTED" | + "MDBX_PANIC" | + "MDBX_VERSION_MISMATCH" | + "MDBX_INVALID" | + "MDBX_MAP_FULL" | + "MDBX_DBS_FULL" | + "MDBX_READERS_FULL" | + "MDBX_TLS_FULL" | + "MDBX_TXN_FULL" | + "MDBX_CURSOR_FULL" | + "MDBX_PAGE_FULL" | + "MDBX_MAP_RESIZED" | + "MDBX_INCOMPATIBLE" | + "MDBX_BAD_RSLOT" | + "MDBX_BAD_TXN" | + "MDBX_BAD_VALSIZE" | + "MDBX_BAD_DBI" | + "MDBX_LOG_DONTCHANGE" | + "MDBX_DBG_DONTCHANGE" | + "MDBX_RESULT_TRUE" | + "MDBX_UNABLE_EXTEND_MAPSIZE" | + "MDBX_PROBLEM" | + "MDBX_LAST_LMDB_ERRCODE" | + "MDBX_BUSY" | + "MDBX_EMULTIVAL" | + "MDBX_EBADSIGN" | + "MDBX_WANNA_RECOVERY" | + "MDBX_EKEYMISMATCH" | + "MDBX_TOO_LARGE" | + "MDBX_THREAD_MISMATCH" | + "MDBX_TXN_OVERLAPPING" | + "MDBX_LAST_ERRCODE" => Some(IntKind::Int), + _ => Some(IntKind::UInt), + } + } + } + + let bindings = bindgen::Builder::default() + .header(mdbx.join("mdbx.h").to_string_lossy()) + .allowlist_var("^(MDBX|mdbx)_.*") + .allowlist_type("^(MDBX|mdbx)_.*") + .allowlist_function("^(MDBX|mdbx)_.*") + .size_t_is_usize(true) + .merge_extern_blocks(true) + .parse_callbacks(Box::new(Callbacks)) + .layout_tests(false) + .prepend_enum_name(false) + .generate_comments(false) + .formatter(Formatter::Rustfmt) + .generate() + .expect("Unable to generate bindings"); + bindings.write_to_file(out_file).expect("Couldn't write bindings!"); } diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 30ea03834109..ca51ac816b28 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -11,8 +11,7 @@ use ffi::{ MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE, MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE, }; -use libc::c_void; -use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr}; +use std::{borrow::Cow, ffi::c_void, fmt, marker::PhantomData, mem, ptr}; /// A cursor for navigating the items within a database. pub struct Cursor diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 2cbd3d8aa1d7..b4360a2b87f1 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -201,8 +201,8 @@ impl Environment { /// Note: /// /// * MDBX stores all the freelists in the designated database 0 in each environment, and the - /// freelist count is stored at the beginning of the value as `libc::uint32_t` in the native - /// byte order. + /// freelist count is stored at the beginning of the value as `uint32_t` in the native byte + /// order. /// /// * It will create a read transaction to traverse the freelist database. pub fn freelist(&self) -> Result { diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 20a101153899..1df5a397b2de 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -1,5 +1,4 @@ -use libc::c_int; -use std::result; +use std::{ffi::c_int, result}; /// An MDBX result. pub type Result = result::Result; diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 77838e111b95..f6ab5ffb2a0d 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -8,9 +8,9 @@ use crate::{ }; use ffi::{mdbx_txn_renew, MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; use indexmap::IndexSet; -use libc::{c_uint, c_void}; use parking_lot::{Mutex, MutexGuard}; use std::{ + ffi::{c_uint, c_void}, fmt::{self, Debug}, mem::size_of, ptr, slice, diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index fb485e32ac9c..dcc916e783f8 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -32,7 +32,7 @@ lz4_flex = { version = "0.11", default-features = false } # offsets sucds = "~0.8" -memmap2 = "0.7.1" +memmap2 = "0.9.4" bincode = "1.3" serde = { workspace = true, features = ["derive"] } tracing.workspace = true diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index 76e8c6d16b69..28a92fe909f2 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -30,6 +30,7 @@ pub trait Compression: Serialize + for<'a> Deserialize<'a> { true } + #[cfg(test)] /// If required, prepares compression algorithm with an early pass on the data. fn prepare_compression( &mut self, @@ -95,6 +96,7 @@ impl Compression for Compressors { } } + #[cfg(test)] fn prepare_compression( &mut self, columns: Vec>>, diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 8194538e42f8..e805f187839f 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -185,6 +185,7 @@ impl Compression for Zstd { matches!(self.state, ZstdState::Ready) } + #[cfg(test)] /// If using it with dictionaries, prepares a dictionary for each column. fn prepare_compression( &mut self, @@ -208,7 +209,6 @@ impl Compression for Zstd { return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())); } - // TODO: parallel calculation let mut dictionaries = vec![]; for column in columns { // ZSTD requires all training data to be continuous in memory, alongside the size of @@ -273,6 +273,7 @@ impl<'a> std::fmt::Debug for ZstdDictionaries<'a> { } impl<'a> ZstdDictionaries<'a> { + #[cfg(test)] /// Creates [`ZstdDictionaries`]. pub(crate) fn new(raw: Vec) -> Self { Self(raw.into_iter().map(ZstdDictionary::Raw).collect()) @@ -315,6 +316,7 @@ impl<'a> ZstdDictionaries<'a> { /// A Zstd dictionary. It's created and serialized with [`ZstdDictionary::Raw`], and deserialized as /// [`ZstdDictionary::Loaded`]. pub(crate) enum ZstdDictionary<'a> { + #[allow(dead_code)] Raw(RawDictionary), Loaded(DecoderDictionary<'a>), } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index be7fc829e379..5942c0442780 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -67,7 +67,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { self.row = 0; } - /// Returns a row, searching it by a key used during [`NippyJar::prepare_index`]. + /// Returns a row, searching it by a key. /// /// **May return false positives.** /// @@ -130,7 +130,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { )) } - /// Returns a row, searching it by a key used during [`NippyJar::prepare_index`] by using a + /// Returns a row, searching it by a key using a /// `mask` to only read certain columns from the row. /// /// **May return false positives.** diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index 58e27a76b4c5..225d4fba30fb 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -24,7 +24,7 @@ pub enum NippyJarError { #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), #[error(transparent)] - FilterError(#[from] cuckoofilter::CuckooError), + EthFilterError(#[from] cuckoofilter::CuckooError), #[error("nippy jar initialized without filter")] FilterMissing, #[error("filter has reached max capacity")] @@ -42,6 +42,11 @@ pub enum NippyJarError { /// The read offset size in number of bytes. offset_size: u8, }, + #[error("the size of an offset must be at least 1 byte, got {offset_size}")] + OffsetSizeTooSmall { + /// The read offset size in number of bytes. + offset_size: u8, + }, #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { /// The index of the offset that was being read. diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 7f0995dae3db..241a46eb4f28 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -28,7 +28,9 @@ pub mod filter; use filter::{Cuckoo, InclusionFilter, InclusionFilters}; pub mod compression; -use compression::{Compression, Compressors}; +#[cfg(test)] +use compression::Compression; +use compression::Compressors; pub mod phf; pub use phf::PHFKey; @@ -306,6 +308,56 @@ impl NippyJar { DataReader::new(self.data_path()) } + /// Writes all necessary configuration to file. + fn freeze_config(&self) -> Result<(), NippyJarError> { + // Atomic writes are hard: + let mut tmp_path = self.config_path(); + tmp_path.set_extension(".tmp"); + + // Write to temporary file + let mut file = File::create(&tmp_path)?; + bincode::serialize_into(&mut file, &self)?; + + // fsync() file + file.sync_all()?; + + // Rename file, not move + reth_fs_util::rename(&tmp_path, self.config_path())?; + + // fsync() dir + if let Some(parent) = tmp_path.parent() { + OpenOptions::new().read(true).open(parent)?.sync_all()?; + } + Ok(()) + } +} + +impl InclusionFilter for NippyJar { + fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { + self.filter.as_mut().ok_or(NippyJarError::FilterMissing)?.add(element) + } + + fn contains(&self, element: &[u8]) -> Result { + self.filter.as_ref().ok_or(NippyJarError::FilterMissing)?.contains(element) + } + + fn size(&self) -> usize { + self.filter.as_ref().map(|f| f.size()).unwrap_or(0) + } +} + +impl PerfectHashingFunction for NippyJar { + fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { + self.phf.as_mut().ok_or(NippyJarError::PHFMissing)?.set_keys(keys) + } + + fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { + self.phf.as_ref().ok_or(NippyJarError::PHFMissing)?.get_index(key) + } +} + +#[cfg(test)] +impl NippyJar { /// If required, prepares any compression algorithm to an early pass of the data. pub fn prepare_compression( &mut self, @@ -429,53 +481,6 @@ impl NippyJar { Ok(()) } - - /// Writes all necessary configuration to file. - fn freeze_config(&self) -> Result<(), NippyJarError> { - // Atomic writes are hard: - let mut tmp_path = self.config_path(); - tmp_path.set_extension(".tmp"); - - // Write to temporary file - let mut file = File::create(&tmp_path)?; - bincode::serialize_into(&mut file, &self)?; - - // fsync() file - file.sync_all()?; - - // Rename file, not move - reth_fs_util::rename(&tmp_path, self.config_path())?; - - // fsync() dir - if let Some(parent) = tmp_path.parent() { - OpenOptions::new().read(true).open(parent)?.sync_all()?; - } - Ok(()) - } -} - -impl InclusionFilter for NippyJar { - fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { - self.filter.as_mut().ok_or(NippyJarError::FilterMissing)?.add(element) - } - - fn contains(&self, element: &[u8]) -> Result { - self.filter.as_ref().ok_or(NippyJarError::FilterMissing)?.contains(element) - } - - fn size(&self) -> usize { - self.filter.as_ref().map(|f| f.size()).unwrap_or(0) - } -} - -impl PerfectHashingFunction for NippyJar { - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { - self.phf.as_mut().ok_or(NippyJarError::PHFMissing)?.set_keys(keys) - } - - fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { - self.phf.as_ref().ok_or(NippyJarError::PHFMissing)?.get_index(key) - } } /// Manages the reading of static file data using memory-mapped files. @@ -513,7 +518,9 @@ impl DataReader { // Ensure that the size of an offset is at most 8 bytes. if offset_size > 8 { - return Err(NippyJarError::OffsetSizeTooBig { offset_size }); + return Err(NippyJarError::OffsetSizeTooBig { offset_size }) + } else if offset_size == 0 { + return Err(NippyJarError::OffsetSizeTooSmall { offset_size }) } Ok(Self { data_file, data_mmap, offset_file, offset_size, offset_mmap }) @@ -551,7 +558,7 @@ impl DataReader { fn offset_at(&self, index: usize) -> Result { let mut buffer: [u8; 8] = [0; 8]; - let offset_end = index + self.offset_size as usize; + let offset_end = index.saturating_add(self.offset_size as usize); if offset_end > self.offset_mmap.len() { return Err(NippyJarError::OffsetOutOfBounds { index }); } @@ -579,6 +586,7 @@ impl DataReader { #[cfg(test)] mod tests { use super::*; + use compression::Compression; use rand::{rngs::SmallRng, seq::SliceRandom, RngCore, SeedableRng}; use std::{collections::HashSet, fs::OpenOptions}; diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 05d7e2d61a7e..44b3bd5ed152 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -130,10 +130,19 @@ impl NippyJarWriter { } let mut offsets_file = OpenOptions::new().read(true).write(true).open(offsets)?; + if is_created { + let mut buf = Vec::with_capacity(1 + OFFSET_SIZE_BYTES as usize); - // First byte of the offset file is the size of one offset in bytes - offsets_file.write_all(&[OFFSET_SIZE_BYTES])?; - offsets_file.seek(SeekFrom::End(0))?; + // First byte of the offset file is the size of one offset in bytes + buf.write_all(&[OFFSET_SIZE_BYTES])?; + + // The last offset should always represent the data file len, which is 0 on + // creation. + buf.write_all(&[0; OFFSET_SIZE_BYTES as usize])?; + + offsets_file.write_all(&buf)?; + offsets_file.seek(SeekFrom::End(0))?; + } Ok((data_file, offsets_file, is_created)) } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index de2fea7578d9..6cf456665bdf 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true @@ -72,5 +73,6 @@ assert_matches.workspace = true rand.workspace = true [features] -test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] optimism = ["reth-primitives/optimism", "reth-execution-types/optimism"] +serde = ["reth-execution-types/serde"] +test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/execution_outcome.rs similarity index 96% rename from crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs rename to crates/storage/provider/src/bundle_state/execution_outcome.rs index 81198481e26b..009076d0ae9d 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/execution_outcome.rs @@ -42,12 +42,14 @@ impl StateWriter for ExecutionOutcome { if let Some(static_file_producer) = &mut static_file_producer { // Increment block on static file header. static_file_producer.increment_block(StaticFileSegment::Receipts, block_number)?; - - for (tx_idx, receipt) in receipts.into_iter().enumerate() { - let receipt = receipt - .expect("receipt should not be filtered when saving to static files."); - static_file_producer.append_receipt(first_tx_index + tx_idx as u64, receipt)?; - } + let receipts = receipts.into_iter().enumerate().map(|(tx_idx, receipt)| { + Ok(( + first_tx_index + tx_idx as u64, + receipt + .expect("receipt should not be filtered when saving to static files."), + )) + }); + static_file_producer.append_receipts(receipts)?; } else if !receipts.is_empty() { for (tx_idx, receipt) in receipts.into_iter().enumerate() { if let Some(receipt) = receipt { @@ -74,9 +76,7 @@ mod tests { models::{AccountBeforeTx, BlockNumberAddress}, }; use reth_primitives::{ - keccak256, - revm::compat::{into_reth_acc, into_revm_acc}, - Account, Address, Receipt, Receipts, StorageEntry, B256, U256, + keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, }; use reth_trie::{test_utils::state_root, StateRoot}; use revm::{ @@ -147,9 +147,9 @@ mod tests { .write_to_db(provider.tx_ref(), 1) .expect("Could not write reverts to DB"); - let reth_account_a = into_reth_acc(account_a); - let reth_account_b = into_reth_acc(account_b); - let reth_account_b_changed = into_reth_acc(account_b_changed.clone()); + let reth_account_a = account_a.into(); + let reth_account_b = account_b.into(); + let reth_account_b_changed = account_b_changed.clone().into(); // Check plain state assert_eq!( @@ -280,6 +280,7 @@ mod tests { EvmStorageSlot { present_value: U256::from(2), original_value: U256::from(1), + ..Default::default() }, )]), }, @@ -470,7 +471,11 @@ mod tests { // 0x00 => 1 => 2 storage: HashMap::from([( U256::ZERO, - EvmStorageSlot { original_value: U256::from(1), present_value: U256::from(2) }, + EvmStorageSlot { + original_value: U256::from(1), + present_value: U256::from(2), + ..Default::default() + }, )]), }, )])); @@ -887,7 +892,7 @@ mod tests { } let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); - updates.flush(tx).unwrap(); + updates.write_to_database(tx).unwrap(); }) .unwrap(); @@ -919,7 +924,7 @@ mod tests { // destroy account 1 let address1 = Address::with_last_byte(1); let account1_old = prestate.remove(&address1).unwrap(); - state.insert_account(address1, into_revm_acc(account1_old.0)); + state.insert_account(address1, account1_old.0.into()); state.commit(HashMap::from([( address1, RevmAccount { @@ -939,7 +944,7 @@ mod tests { let account2_slot2_old_value = *account2.1.get(&slot2_key).unwrap(); state.insert_account_with_storage( address2, - into_revm_acc(account2.0), + account2.0.into(), HashMap::from([(slot2, account2_slot2_old_value)]), ); @@ -949,7 +954,7 @@ mod tests { address2, RevmAccount { status: AccountStatus::Touched, - info: into_revm_acc(account2.0), + info: account2.0.into(), storage: HashMap::from_iter([( slot2, EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), @@ -962,14 +967,14 @@ mod tests { // change balance of account 3 let address3 = Address::with_last_byte(3); let account3 = prestate.get_mut(&address3).unwrap(); - state.insert_account(address3, into_revm_acc(account3.0)); + state.insert_account(address3, account3.0.into()); account3.0.balance = U256::from(24); state.commit(HashMap::from([( address3, RevmAccount { status: AccountStatus::Touched, - info: into_revm_acc(account3.0), + info: account3.0.into(), storage: HashMap::default(), }, )])); @@ -979,14 +984,14 @@ mod tests { // change nonce of account 4 let address4 = Address::with_last_byte(4); let account4 = prestate.get_mut(&address4).unwrap(); - state.insert_account(address4, into_revm_acc(account4.0)); + state.insert_account(address4, account4.0.into()); account4.0.nonce = 128; state.commit(HashMap::from([( address4, RevmAccount { status: AccountStatus::Touched, - info: into_revm_acc(account4.0), + info: account4.0.into(), storage: HashMap::default(), }, )])); @@ -1001,7 +1006,7 @@ mod tests { address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, - info: into_revm_acc(account1_new), + info: account1_new.into(), storage: HashMap::default(), }, )])); @@ -1017,7 +1022,7 @@ mod tests { address1, RevmAccount { status: AccountStatus::Touched | AccountStatus::Created, - info: into_revm_acc(account1_new), + info: account1_new.into(), storage: HashMap::from_iter([( slot20, EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index 1b3965a14a45..eaf3dab43ee9 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -1,13 +1,12 @@ //! Bundle state module. //! This module contains all the logic related to bundle state. -mod bundle_state_with_receipts; + +mod execution_outcome; mod hashed_state_changes; mod state_changes; mod state_reverts; -pub use bundle_state_with_receipts::{ - AccountRevertInit, BundleStateInit, ExecutionOutcome, OriginalValuesKnown, RevertsInit, -}; +pub use execution_outcome::{AccountRevertInit, BundleStateInit, OriginalValuesKnown, RevertsInit}; pub use hashed_state_changes::HashedStateChanges; pub use state_changes::StateChanges; pub use state_reverts::{StateReverts, StorageRevertsIter}; diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index 0587f36933f2..57c3b837f3e0 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -4,7 +4,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{revm::compat::into_reth_acc, Bytecode, StorageEntry, U256}; +use reth_primitives::{Bytecode, StorageEntry, U256}; use reth_storage_errors::db::DatabaseError; use revm::db::states::{PlainStorageChangeset, StateChangeset}; @@ -34,7 +34,7 @@ impl StateChanges { for (address, account) in self.0.accounts { if let Some(account) = account { tracing::trace!(target: "provider::bundle_state", ?address, "Updating plain state account"); - accounts_cursor.upsert(address, into_reth_acc(account))?; + accounts_cursor.upsert(address, account.into())?; } else if accounts_cursor.seek_exact(address)?.is_some() { tracing::trace!(target: "provider::bundle_state", ?address, "Deleting plain state account"); accounts_cursor.delete_current()?; diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 3736b5148be8..d65fcaa829f1 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -5,7 +5,7 @@ use reth_db_api::{ models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{revm::compat::into_reth_acc, BlockNumber, StorageEntry, B256, U256}; +use reth_primitives::{BlockNumber, StorageEntry, B256, U256}; use reth_storage_errors::db::DatabaseError; use revm::db::states::{PlainStateReverts, PlainStorageRevert, RevertToSlot}; use std::iter::Peekable; @@ -82,7 +82,7 @@ impl StateReverts { for (address, info) in account_block_reverts { account_changeset_cursor.append_dup( block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, + AccountBeforeTx { address, info: info.map(Into::into) }, )?; } } diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index e0bbe2b72b39..a30db07d8628 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,9 +1,10 @@ use crate::{ AccountReader, BlockHashReader, ExecutionDataProvider, StateProvider, StateRootProvider, }; -use reth_primitives::{proofs::AccountProof, Account, Address, BlockNumber, Bytecode, B256}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::updates::TrieUpdates; +use reth_primitives::{Account, Address, BlockNumber, Bytecode, B256}; +use reth_storage_api::StateProofProvider; +use reth_storage_errors::provider::ProviderResult; +use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::db::BundleState; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] @@ -80,6 +81,21 @@ impl StateRootProvider } } +impl StateProofProvider + for BundleStateProvider +{ + fn proof( + &self, + bundle_state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let mut state = self.block_execution_data_provider.execution_outcome().state().clone(); + state.extend(bundle_state.clone()); + self.state_provider.proof(&state, address, slots) + } +} + impl StateProvider for BundleStateProvider { fn storage( &self, @@ -107,8 +123,4 @@ impl StateProvider for BundleStat self.state_provider.bytecode_by_hash(code_hash) } - - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock) - } } diff --git a/crates/storage/provider/src/providers/chain_info.rs b/crates/storage/provider/src/providers/chain_info.rs index 905be1287ee5..c696fefea9e3 100644 --- a/crates/storage/provider/src/providers/chain_info.rs +++ b/crates/storage/provider/src/providers/chain_info.rs @@ -1,5 +1,6 @@ use parking_lot::RwLock; -use reth_primitives::{BlockNumHash, BlockNumber, ChainInfo, SealedHeader}; +use reth_chainspec::ChainInfo; +use reth_primitives::{BlockNumHash, BlockNumber, SealedHeader}; use std::{ sync::{ atomic::{AtomicU64, Ordering}, @@ -7,6 +8,7 @@ use std::{ }, time::Instant, }; +use tokio::sync::watch; /// Tracks the chain info: canonical head, safe block, finalized block. #[derive(Debug, Clone)] @@ -17,14 +19,16 @@ pub(crate) struct ChainInfoTracker { impl ChainInfoTracker { /// Create a new chain info container for the given canonical head. pub(crate) fn new(head: SealedHeader) -> Self { + let (finalized_block, _) = watch::channel(None); + let (safe_block, _) = watch::channel(None); Self { inner: Arc::new(ChainInfoInner { last_forkchoice_update: RwLock::new(None), last_transition_configuration_exchange: RwLock::new(None), canonical_head_number: AtomicU64::new(head.number), canonical_head: RwLock::new(head), - safe_block: RwLock::new(None), - finalized_block: RwLock::new(None), + safe_block, + finalized_block, }), } } @@ -62,12 +66,12 @@ impl ChainInfoTracker { /// Returns the safe header of the chain. pub(crate) fn get_safe_header(&self) -> Option { - self.inner.safe_block.read().clone() + self.inner.safe_block.borrow().clone() } /// Returns the finalized header of the chain. pub(crate) fn get_finalized_header(&self) -> Option { - self.inner.finalized_block.read().clone() + self.inner.finalized_block.borrow().clone() } /// Returns the canonical head of the chain. @@ -84,14 +88,14 @@ impl ChainInfoTracker { /// Returns the safe header of the chain. #[allow(dead_code)] pub(crate) fn get_safe_num_hash(&self) -> Option { - let h = self.inner.safe_block.read(); + let h = self.inner.safe_block.borrow(); h.as_ref().map(|h| h.num_hash()) } /// Returns the finalized header of the chain. #[allow(dead_code)] pub(crate) fn get_finalized_num_hash(&self) -> Option { - let h = self.inner.finalized_block.read(); + let h = self.inner.finalized_block.borrow(); h.as_ref().map(|h| h.num_hash()) } @@ -106,12 +110,16 @@ impl ChainInfoTracker { /// Sets the safe header of the chain. pub(crate) fn set_safe(&self, header: SealedHeader) { - self.inner.safe_block.write().replace(header); + self.inner.safe_block.send_modify(|h| { + let _ = h.replace(header); + }); } /// Sets the finalized header of the chain. pub(crate) fn set_finalized(&self, header: SealedHeader) { - self.inner.finalized_block.write().replace(header); + self.inner.finalized_block.send_modify(|h| { + let _ = h.replace(header); + }); } } @@ -131,7 +139,7 @@ struct ChainInfoInner { /// The canonical head of the chain. canonical_head: RwLock, /// The block that the beacon node considers safe. - safe_block: RwLock>, + safe_block: watch::Sender>, /// The block that the beacon node considers finalized. - finalized_block: RwLock>, + finalized_block: watch::Sender>, } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 23d59c86d7c4..c32207b10e19 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -3,22 +3,22 @@ use crate::{ to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, - ProviderError, PruneCheckpointReader, RequestsProvider, StageCheckpointReader, - StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, - WithdrawalsProvider, + EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, ProviderError, + PruneCheckpointReader, RequestsProvider, StageCheckpointReader, StateProviderBox, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, - ChainSpec, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, - TxNumber, Withdrawal, Withdrawals, B256, U256, + Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Header, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, + U256, }; -use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -27,6 +27,7 @@ use std::{ path::Path, sync::Arc, }; +use tokio::sync::watch; use tracing::trace; mod metrics; @@ -45,6 +46,8 @@ pub struct ProviderFactory { chain_spec: Arc, /// Static File Provider static_file_provider: StaticFileProvider, + /// Optional pruning configuration + prune_modes: PruneModes, } impl ProviderFactory { @@ -54,7 +57,7 @@ impl ProviderFactory { chain_spec: Arc, static_file_provider: StaticFileProvider, ) -> Self { - Self { db: Arc::new(db), chain_spec, static_file_provider } + Self { db: Arc::new(db), chain_spec, static_file_provider, prune_modes: PruneModes::none() } } /// Enables metrics on the static file provider. @@ -63,6 +66,12 @@ impl ProviderFactory { self } + /// Sets the pruning configuration for an existing [`ProviderFactory`]. + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } + /// Returns reference to the underlying database. pub fn db_ref(&self) -> &DB { &self.db @@ -88,6 +97,7 @@ impl ProviderFactory { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, + prune_modes: PruneModes::none(), }) } } @@ -96,12 +106,16 @@ impl ProviderFactory { /// Returns a provider with a created `DbTx` inside, which allows fetching data from the /// database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. + /// + /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing + /// data. #[track_caller] pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), self.static_file_provider.clone(), + self.prune_modes.clone(), )) } @@ -115,6 +129,7 @@ impl ProviderFactory { self.db.tx_mut()?, self.chain_spec.clone(), self.static_file_provider.clone(), + self.prune_modes.clone(), ))) } @@ -165,10 +180,10 @@ impl StaticFileProviderFactory for ProviderFactory { impl HeaderSyncGapProvider for ProviderFactory { fn sync_gap( &self, - mode: HeaderSyncMode, + tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, ) -> ProviderResult { - self.provider()?.sync_gap(mode, highest_uninterrupted_block) + self.provider()?.sync_gap(tip, highest_uninterrupted_block) } } @@ -329,6 +344,14 @@ impl BlockReader for ProviderFactory { self.provider()?.block_with_senders(id, transaction_kind) } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.provider()?.sealed_block_with_senders(id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } @@ -339,6 +362,13 @@ impl BlockReader for ProviderFactory { ) -> ProviderResult> { self.provider()?.block_with_senders_range(range) } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.provider()?.sealed_block_with_senders_range(range) + } } impl TransactionsProvider for ProviderFactory { @@ -511,22 +541,6 @@ impl EvmEnvProvider for ProviderFactory { self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()> { - self.provider()?.fill_block_env_at(block_env, at) - } - - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()> { - self.provider()?.fill_block_env_with_header(block_env, header) - } - fn fill_cfg_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -576,6 +590,7 @@ impl Clone for ProviderFactory { db: Arc::clone(&self.db), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), + prune_modes: self.prune_modes.clone(), } } } @@ -585,20 +600,18 @@ mod tests { use crate::{ providers::{StaticFileProvider, StaticFileWriter}, test_utils::create_test_provider_factory, - BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, HeaderSyncMode, - TransactionsProvider, + BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, TransactionsProvider, }; use alloy_rlp::Decodable; use assert_matches::assert_matches; use rand::Rng; + use reth_chainspec::ChainSpecBuilder; use reth_db::{ mdbx::DatabaseArguments, tables, test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; - use reth_primitives::{ - hex_literal::hex, ChainSpecBuilder, SealedBlock, StaticFileSegment, TxNumber, B256, U256, - }; + use reth_primitives::{hex_literal::hex, SealedBlock, StaticFileSegment, TxNumber, B256, U256}; use reth_prune_types::{PruneMode, PruneModes}; use reth_storage_errors::provider::ProviderError; use reth_testing_utils::{ @@ -663,7 +676,7 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None), + provider.insert_block(block.clone().try_seal_with_senders().unwrap()), Ok(_) ); assert_matches!( @@ -674,16 +687,14 @@ mod tests { } { - let provider = factory.provider_rw().unwrap(); + let prune_modes = PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: Some(PruneMode::Full), + ..PruneModes::none() + }; + let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block( - block.clone().try_seal_with_senders().unwrap(), - Some(&PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: Some(PruneMode::Full), - ..PruneModes::none() - }) - ), + provider.insert_block(block.clone().try_seal_with_senders().unwrap(),), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); @@ -703,7 +714,7 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None), + provider.insert_block(block.clone().try_seal_with_senders().unwrap()), Ok(_) ); @@ -741,7 +752,6 @@ mod tests { let mut rng = generators::rng(); let consensus_tip = rng.gen(); let (_tip_tx, tip_rx) = watch::channel(consensus_tip); - let mode = HeaderSyncMode::Tip(tip_rx); // Genesis let checkpoint = 0; @@ -749,7 +759,7 @@ mod tests { // Empty database assert_matches!( - provider.sync_gap(mode.clone(), checkpoint), + provider.sync_gap(tip_rx.clone(), checkpoint), Err(ProviderError::HeaderNotFound(block_number)) if block_number.as_number().unwrap() == checkpoint ); @@ -761,7 +771,7 @@ mod tests { static_file_writer.commit().unwrap(); drop(static_file_writer); - let gap = provider.sync_gap(mode, checkpoint).unwrap(); + let gap = provider.sync_gap(tip_rx, checkpoint).unwrap(); assert_eq!(gap.local_head, head); assert_eq!(gap.target.tip(), consensus_tip.into()); } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1ad3d285d945..fe979d5a5ba0 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,19 +1,20 @@ use crate::{ - bundle_state::{BundleStateInit, ExecutionOutcome, HashedStateChanges, RevertsInit}, + bundle_state::{BundleStateInit, HashedStateChanges, RevertsInit}, providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, to_range, traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, - Chain, EvmEnvProvider, FinalizedBlockReader, FinalizedBlockWriter, HashingWriter, - HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, HistoricalStateProvider, - HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, + EvmEnvProvider, FinalizedBlockReader, FinalizedBlockWriter, HashingWriter, HeaderProvider, + HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, HistoryWriter, + LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, StorageReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use itertools::{izip, Itertools}; +use reth_chainspec::{ChainInfo, ChainSpec, EthereumHardforks}; use reth_db::{tables, BlockNumberList}; use reth_db_api::{ common::KeyValue, @@ -28,15 +29,14 @@ use reth_db_api::{ DatabaseError, }; use reth_evm::ConfigureEvmEnv; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ - keccak256, - revm::{config::revm_spec, env::fill_block_env}, - Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - ChainInfo, ChainSpec, GotExpected, Head, Header, Receipt, Requests, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, - Withdrawal, Withdrawals, B256, U256, + keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, + BlockWithSenders, GotExpected, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, + SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, + Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -46,7 +46,7 @@ use reth_trie::{ updates::TrieUpdates, HashedPostState, Nibbles, StateRoot, }; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -55,6 +55,7 @@ use std::{ sync::{mpsc, Arc}, time::{Duration, Instant}, }; +use tokio::sync::watch; use tracing::{debug, error, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. @@ -103,6 +104,8 @@ pub struct DatabaseProvider { chain_spec: Arc, /// Static File provider static_file_provider: StaticFileProvider, + /// Pruning configuration + prune_modes: PruneModes, } impl DatabaseProvider { @@ -114,52 +117,13 @@ impl DatabaseProvider { impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. - pub fn new_rw( + pub const fn new_rw( tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, + prune_modes: PruneModes, ) -> Self { - Self { tx, chain_spec, static_file_provider } - } -} - -impl DatabaseProvider { - /// Iterates over read only values in the given table and collects them into a vector. - /// - /// Early-returns if the range is empty, without opening a cursor transaction. - fn cursor_read_collect>( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = match range_size_hint(&range) { - Some(0) | None => return Ok(Vec::new()), - Some(capacity) => capacity, - }; - let mut cursor = self.tx.cursor_read::()?; - self.cursor_collect_with_capacity(&mut cursor, range, capacity) - } - - /// Iterates over read only values in the given table and collects them into a vector. - fn cursor_collect>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = range_size_hint(&range).unwrap_or(0); - self.cursor_collect_with_capacity(cursor, range, capacity) - } - - fn cursor_collect_with_capacity>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - capacity: usize, - ) -> ProviderResult> { - let mut items = Vec::with_capacity(capacity); - for entry in cursor.walk_range(range)? { - items.push(entry?.1); - } - Ok(items) + Self { tx, chain_spec, static_file_provider, prune_modes } } } @@ -208,12 +172,12 @@ impl DatabaseProvider { } impl DatabaseProvider { - #[cfg(any(test, feature = "test-utils"))] - /// Inserts an historical block. Used for setting up test environments + // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. + // #[cfg(any(test, feature = "test-utils"))] + /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, block: SealedBlockWithSenders, - prune_modes: Option<&PruneModes>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -237,7 +201,7 @@ impl DatabaseProvider { writer.append_header(block.header.as_ref().clone(), ttd, block.hash())?; - self.insert_block(block, prune_modes) + self.insert_block(block) } } @@ -291,12 +255,13 @@ where impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. - pub fn new( + pub const fn new( tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, + prune_modes: PruneModes, ) -> Self { - Self { tx, chain_spec, static_file_provider } + Self { tx, chain_spec, static_file_provider, prune_modes } } /// Consume `DbTx` or `DbTxMut`. @@ -314,6 +279,22 @@ impl DatabaseProvider { &self.tx } + /// Returns a reference to the [`ChainSpec`]. + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec + } + + /// Disables long-lived read transaction safety guarantees for leaks prevention and + /// observability improvements. + /// + /// CAUTION: In most of the cases, you want the safety guarantees for long read transactions + /// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning + /// that Reth as a node is offline and not progressing. + pub fn disable_long_read_transaction_safety(mut self) -> Self { + self.tx.disable_long_read_transaction_safety(); + self + } + /// Return full table as Vec pub fn table(&self) -> Result>, DatabaseError> where @@ -325,15 +306,42 @@ impl DatabaseProvider { .collect::, DatabaseError>>() } - /// Disables long-lived read transaction safety guarantees for leaks prevention and - /// observability improvements. + /// Iterates over read only values in the given table and collects them into a vector. /// - /// CAUTION: In most of the cases, you want the safety guarantees for long read transactions - /// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning - /// that Reth as a node is offline and not progressing. - pub fn disable_long_read_transaction_safety(mut self) -> Self { - self.tx.disable_long_read_transaction_safety(); - self + /// Early-returns if the range is empty, without opening a cursor transaction. + fn cursor_read_collect>( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = match range_size_hint(&range) { + Some(0) | None => return Ok(Vec::new()), + Some(capacity) => capacity, + }; + let mut cursor = self.tx.cursor_read::()?; + self.cursor_collect_with_capacity(&mut cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector. + fn cursor_collect>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = range_size_hint(&range).unwrap_or(0); + self.cursor_collect_with_capacity(cursor, range, capacity) + } + + fn cursor_collect_with_capacity>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + capacity: usize, + ) -> ProviderResult> { + let mut items = Vec::with_capacity(capacity); + for entry in cursor.walk_range(range)? { + items.push(entry?.1); + } + Ok(items) } fn transactions_by_tx_range_with_cursor( @@ -353,9 +361,221 @@ impl DatabaseProvider { ) } - /// Returns a reference to the [`ChainSpec`]. - pub fn chain_spec(&self) -> &ChainSpec { - &self.chain_spec + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + header_by_number: HF, + construct_block: BF, + ) -> ProviderResult> + where + H: AsRef
, + HF: FnOnce(BlockNumber) -> ProviderResult>, + BF: FnOnce( + H, + Vec, + Vec
, + Vec
, + Option, + Option, + ) -> ProviderResult>, + { + let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; + let Some(header) = header_by_number(block_number)? else { return Ok(None) }; + + let ommers = self.ommers(block_number.into())?.unwrap_or_default(); + let withdrawals = + self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; + let requests = self.requests_by_block(block_number.into(), header.as_ref().timestamp)?; + + // Get the block body + // + // If the body indices are not found, this means that the transactions either do not exist + // in the database yet, or they do exit but are not indexed. If they exist but are not + // indexed, we don't have enough information to return the block anyways, so we return + // `None`. + let Some(body) = self.block_body_indices(block_number)? else { return Ok(None) }; + + let tx_range = body.tx_num_range(); + + let (transactions, senders) = if tx_range.is_empty() { + (vec![], vec![]) + } else { + (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) + }; + + let body = transactions + .into_iter() + .map(|tx| match transaction_kind { + TransactionVariant::NoHash => TransactionSigned { + // Caller explicitly asked for no hash, so we don't calculate it + hash: B256::ZERO, + signature: tx.signature, + transaction: tx.transaction, + }, + TransactionVariant::WithHash => tx.with_hash(), + }) + .collect(); + + construct_block(header, body, senders, ommers, withdrawals, requests) + } + + /// Returns a range of blocks from the database. + /// + /// Uses the provided `headers_range` to get the headers for the range, and `assemble_block` to + /// construct blocks from the following inputs: + /// – Header + /// - Range of transaction numbers + /// – Ommers + /// – Withdrawals + /// – Requests + /// – Senders + fn block_range( + &self, + range: RangeInclusive, + headers_range: HF, + mut assemble_block: F, + ) -> ProviderResult> + where + H: AsRef
, + HF: FnOnce(RangeInclusive) -> ProviderResult>, + F: FnMut( + H, + Range, + Vec
, + Option, + Option, + ) -> ProviderResult, + { + if range.is_empty() { + return Ok(Vec::new()) + } + + let len = range.end().saturating_sub(*range.start()) as usize; + let mut blocks = Vec::with_capacity(len); + + let headers = headers_range(range)?; + let mut ommers_cursor = self.tx.cursor_read::()?; + let mut withdrawals_cursor = self.tx.cursor_read::()?; + let mut requests_cursor = self.tx.cursor_read::()?; + let mut block_body_cursor = self.tx.cursor_read::()?; + + for header in headers { + let header_ref = header.as_ref(); + // If the body indices are not found, this means that the transactions either do + // not exist in the database yet, or they do exit but are + // not indexed. If they exist but are not indexed, we don't + // have enough information to return the block anyways, so + // we skip the block. + if let Some((_, block_body_indices)) = + block_body_cursor.seek_exact(header_ref.number)? + { + let tx_range = block_body_indices.tx_num_range(); + + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = + if self.chain_spec.is_shanghai_active_at_timestamp(header_ref.timestamp) { + Some( + withdrawals_cursor + .seek_exact(header_ref.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default(), + ) + } else { + None + }; + let requests = + if self.chain_spec.is_prague_active_at_timestamp(header_ref.timestamp) { + Some(requests_cursor.seek_exact(header_ref.number)?.unwrap_or_default().1) + } else { + None + }; + let ommers = + if self.chain_spec.final_paris_total_difficulty(header_ref.number).is_some() { + Vec::new() + } else { + ommers_cursor + .seek_exact(header_ref.number)? + .map(|(_, o)| o.ommers) + .unwrap_or_default() + }; + + if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals, requests) { + blocks.push(b); + } + } + } + + Ok(blocks) + } + + /// Returns a range of blocks from the database, along with the senders of each + /// transaction in the blocks. + /// + /// Uses the provided `headers_range` to get the headers for the range, and `assemble_block` to + /// construct blocks from the following inputs: + /// – Header + /// - Transactions + /// – Ommers + /// – Withdrawals + /// – Requests + /// – Senders + fn block_with_senders_range( + &self, + range: RangeInclusive, + headers_range: HF, + assemble_block: BF, + ) -> ProviderResult> + where + H: AsRef
, + HF: Fn(RangeInclusive) -> ProviderResult>, + BF: Fn( + H, + Vec, + Vec
, + Option, + Option, + Vec
, + ) -> ProviderResult, + { + let mut tx_cursor = self.tx.cursor_read::()?; + let mut senders_cursor = self.tx.cursor_read::()?; + + self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals, requests| { + let (body, senders) = if tx_range.is_empty() { + (Vec::new(), Vec::new()) + } else { + let body = self + .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect::>(); + // fetch senders from the senders table + let known_senders = + senders_cursor + .walk_range(tx_range.clone())? + .collect::, _>>()?; + + let mut senders = Vec::with_capacity(body.len()); + for (tx_num, tx) in tx_range.zip(body.iter()) { + match known_senders.get(&tx_num) { + None => { + // recover the sender from the transaction if not found + let sender = tx + .recover_signer_unchecked() + .ok_or_else(|| ProviderError::SenderRecoveryError)?; + senders.push(sender); + } + Some(sender) => senders.push(*sender), + } + } + + (body, senders) + }; + + assemble_block(header, body, ommers, withdrawals, requests, senders) + }) } } @@ -1116,7 +1336,7 @@ impl ChangeSetReader for DatabaseProvider { impl HeaderSyncGapProvider for DatabaseProvider { fn sync_gap( &self, - mode: HeaderSyncMode, + tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, ) -> ProviderResult { let static_file_provider = self.static_file_provider(); @@ -1151,10 +1371,7 @@ impl HeaderSyncGapProvider for DatabaseProvider { .sealed_header(highest_uninterrupted_block)? .ok_or_else(|| ProviderError::HeaderNotFound(highest_uninterrupted_block.into()))?; - let target = match mode { - HeaderSyncMode::Tip(rx) => SyncTarget::Tip(*rx.borrow()), - HeaderSyncMode::Continuous => SyncTarget::TipNum(highest_uninterrupted_block + 1), - }; + let target = SyncTarget::Tip(*tip.borrow()); Ok(HeaderSyncGap { local_head, target }) } @@ -1316,79 +1533,6 @@ impl BlockNumReader for DatabaseProvider { } } -impl DatabaseProvider { - fn process_block_range( - &self, - range: RangeInclusive, - mut assemble_block: F, - ) -> ProviderResult> - where - F: FnMut( - Range, - Header, - Vec
, - Option, - Option, - ) -> ProviderResult, - { - if range.is_empty() { - return Ok(Vec::new()); - } - - let len = range.end().saturating_sub(*range.start()) as usize; - let mut blocks = Vec::with_capacity(len); - - let headers = self.headers_range(range)?; - let mut ommers_cursor = self.tx.cursor_read::()?; - let mut withdrawals_cursor = self.tx.cursor_read::()?; - let mut requests_cursor = self.tx.cursor_read::()?; - let mut block_body_cursor = self.tx.cursor_read::()?; - - for header in headers { - // If the body indices are not found, this means that the transactions either do - // not exist in the database yet, or they do exit but are - // not indexed. If they exist but are not indexed, we don't - // have enough information to return the block anyways, so - // we skip the block. - if let Some((_, block_body_indices)) = block_body_cursor.seek_exact(header.number)? { - let tx_range = block_body_indices.tx_num_range(); - - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { - Some( - withdrawals_cursor - .seek_exact(header.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default(), - ) - } else { - None - }; - let requests = if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { - Some(requests_cursor.seek_exact(header.number)?.unwrap_or_default().1) - } else { - None - }; - let ommers = - if self.chain_spec.final_paris_total_difficulty(header.number).is_some() { - Vec::new() - } else { - ommers_cursor - .seek_exact(header.number)? - .map(|(_, o)| o.ommers) - .unwrap_or_default() - }; - if let Ok(b) = assemble_block(tx_range, header, ommers, withdrawals, requests) { - blocks.push(b); - } - } - } - Ok(blocks) - } -} - impl BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_database() { @@ -1475,108 +1619,92 @@ impl BlockReader for DatabaseProvider { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; - let Some(header) = self.header_by_number(block_number)? else { return Ok(None) }; - - let ommers = self.ommers(block_number.into())?.unwrap_or_default(); - let withdrawals = self.withdrawals_by_block(block_number.into(), header.timestamp)?; - let requests = self.requests_by_block(block_number.into(), header.timestamp)?; - - // Get the block body - // - // If the body indices are not found, this means that the transactions either do not exist - // in the database yet, or they do exit but are not indexed. If they exist but are not - // indexed, we don't have enough information to return the block anyways, so we return - // `None`. - let Some(body) = self.block_body_indices(block_number)? else { return Ok(None) }; - - let tx_range = body.tx_num_range(); - - let (transactions, senders) = if tx_range.is_empty() { - (vec![], vec![]) - } else { - (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) - }; - - let body = transactions - .into_iter() - .map(|tx| match transaction_kind { - TransactionVariant::NoHash => TransactionSigned { - // Caller explicitly asked for no hash, so we don't calculate it - hash: B256::ZERO, - signature: tx.signature, - transaction: tx.transaction, - }, - TransactionVariant::WithHash => tx.with_hash(), - }) - .collect(); + self.block_with_senders( + id, + transaction_kind, + |block_number| self.header_by_number(block_number), + |header, body, senders, ommers, withdrawals, requests| { + Block { header, body, ommers, withdrawals, requests } + // Note: we're using unchecked here because we know the block contains valid txs + // wrt to its height and can ignore the s value check so pre + // EIP-2 txs are allowed + .try_with_senders_unchecked(senders) + .map(Some) + .map_err(|_| ProviderError::SenderRecoveryError) + }, + ) + } - Block { header, body, ommers, withdrawals, requests } - // Note: we're using unchecked here because we know the block contains valid txs wrt to - // its height and can ignore the s value check so pre EIP-2 txs are allowed - .try_with_senders_unchecked(senders) - .map(Some) - .map_err(|_| ProviderError::SenderRecoveryError) + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.block_with_senders( + id, + transaction_kind, + |block_number| self.sealed_header(block_number), + |header, body, senders, ommers, withdrawals, requests| { + SealedBlock { header, body, ommers, withdrawals, requests } + // Note: we're using unchecked here because we know the block contains valid txs + // wrt to its height and can ignore the s value check so pre + // EIP-2 txs are allowed + .try_with_senders_unchecked(senders) + .map(Some) + .map_err(|_| ProviderError::SenderRecoveryError) + }, + ) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { let mut tx_cursor = self.tx.cursor_read::()?; - self.process_block_range(range, |tx_range, header, ommers, withdrawals, requests| { - let body = if tx_range.is_empty() { - Vec::new() - } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() - }; - Ok(Block { header, body, ommers, withdrawals, requests }) - }) + self.block_range( + range, + |range| self.headers_range(range), + |header, tx_range, ommers, withdrawals, requests| { + let body = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect() + }; + Ok(Block { header, body, ommers, withdrawals, requests }) + }, + ) } fn block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::()?; - let mut senders_cursor = self.tx.cursor_read::()?; - - self.process_block_range(range, |tx_range, header, ommers, withdrawals, requests| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) - } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); - // fetch senders from the senders table - let known_senders = - senders_cursor - .walk_range(tx_range.clone())? - .collect::, _>>()?; - - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { - match known_senders.get(&tx_num) { - None => { - // recover the sender from the transaction if not found - let sender = tx - .recover_signer_unchecked() - .ok_or_else(|| ProviderError::SenderRecoveryError)?; - senders.push(sender); - } - Some(sender) => senders.push(*sender), - } - } - - (body, senders) - }; + self.block_with_senders_range( + range, + |range| self.headers_range(range), + |header, body, ommers, withdrawals, requests, senders| { + Block { header, body, ommers, withdrawals, requests } + .try_with_senders_unchecked(senders) + .map_err(|_| ProviderError::SenderRecoveryError) + }, + ) + } - Block { header, body, ommers, withdrawals, requests } - .try_with_senders_unchecked(senders) - .map_err(|_| ProviderError::SenderRecoveryError) - }) + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.block_with_senders_range( + range, + |range| self.sealed_headers_range(range), + |header, body, ommers, withdrawals, requests, senders| { + SealedBlockWithSenders::new( + SealedBlock { header, body, ommers, withdrawals, requests }, + senders, + ) + .ok_or(ProviderError::SenderRecoveryError) + }, + ) } } @@ -1919,7 +2047,7 @@ impl EvmEnvProvider for DatabaseProvider { cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, header: &Header, - _evm_config: EvmConfig, + evm_config: EvmConfig, ) -> ProviderResult<()> where EvmConfig: ConfigureEvmEnv, @@ -1927,7 +2055,7 @@ impl EvmEnvProvider for DatabaseProvider { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - EvmConfig::fill_cfg_and_block_env( + evm_config.fill_cfg_and_block_env( cfg, block_env, &self.chain_spec, @@ -1937,41 +2065,6 @@ impl EvmEnvProvider for DatabaseProvider { Ok(()) } - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()> { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - - self.fill_block_env_with_header(block_env, &header) - } - - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()> { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - let spec_id = revm_spec( - &self.chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - // Not required - hash: Default::default(), - }, - ); - let after_merge = spec_id >= SpecId::MERGE; - fill_block_env(block_env, &self.chain_spec, header, after_merge); - Ok(()) - } - fn fill_cfg_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -1990,7 +2083,7 @@ impl EvmEnvProvider for DatabaseProvider { &self, cfg: &mut CfgEnvWithHandlerCfg, header: &Header, - _evm_config: EvmConfig, + evm_config: EvmConfig, ) -> ProviderResult<()> where EvmConfig: ConfigureEvmEnv, @@ -1998,7 +2091,7 @@ impl EvmEnvProvider for DatabaseProvider { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - EvmConfig::fill_cfg_env(cfg, &self.chain_spec, header, total_difficulty); + evm_config.fill_cfg_env(cfg, &self.chain_spec, header, total_difficulty); Ok(()) } } @@ -2313,7 +2406,7 @@ impl HashingWriter for DatabaseProvider { block_hash: end_block_hash, }))); } - trie_updates.flush(&self.tx)?; + trie_updates.write_to_database(&self.tx)?; } durations_recorder.record_relative(metrics::Action::InsertMerkleTree); @@ -2509,7 +2602,7 @@ impl BlockExecutionWriter for DatabaseProvider { block_hash: parent_hash, }))); } - trie_updates.flush(&self.tx)?; + trie_updates.write_to_database(&self.tx)?; } // get blocks @@ -2538,7 +2631,6 @@ impl BlockWriter for DatabaseProvider { fn insert_block( &self, block: SealedBlockWithSenders, - prune_modes: Option<&PruneModes>, ) -> ProviderResult { let block_number = block.number; @@ -2595,8 +2687,10 @@ impl BlockWriter for DatabaseProvider { for (transaction, sender) in block.block.body.into_iter().zip(block.senders.iter()) { let hash = transaction.hash(); - if prune_modes - .and_then(|modes| modes.sender_recovery) + if self + .prune_modes + .sender_recovery + .as_ref() .filter(|prune_mode| prune_mode.is_full()) .is_none() { @@ -2620,8 +2714,9 @@ impl BlockWriter for DatabaseProvider { } transactions_elapsed += elapsed; - if prune_modes - .and_then(|modes| modes.transaction_lookup) + if self + .prune_modes + .transaction_lookup .filter(|prune_mode| prune_mode.is_full()) .is_none() { @@ -2682,7 +2777,6 @@ impl BlockWriter for DatabaseProvider { execution_outcome: ExecutionOutcome, hashed_state: HashedPostState, trie_updates: TrieUpdates, - prune_modes: Option<&PruneModes>, ) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "providers::db", "Attempted to append empty block range"); @@ -2698,7 +2792,7 @@ impl BlockWriter for DatabaseProvider { // Insert the blocks for block in blocks { - self.insert_block(block, prune_modes)?; + self.insert_block(block)?; durations_recorder.record_relative(metrics::Action::InsertBlock); } @@ -2710,7 +2804,7 @@ impl BlockWriter for DatabaseProvider { // insert hashes and intermediate merkle nodes { HashedStateChanges(hashed_state).write_to_db(&self.tx)?; - trie_updates.flush(&self.tx)?; + trie_updates.write_to_database(&self.tx)?; } durations_recorder.record_relative(metrics::Action::InsertHashes); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 558a75de9730..8ede8dcf7484 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -12,6 +12,7 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; +use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db_api::{ database::Database, models::{AccountBeforeTx, StoredBlockBodyIndices}, @@ -19,9 +20,9 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_primitives::{ Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, - BlockNumberOrTag, BlockWithSenders, ChainInfo, ChainSpec, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, + Withdrawal, Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -327,6 +328,14 @@ where self.database.block_with_senders(id, transaction_kind) } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.database.sealed_block_with_senders(id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } @@ -337,6 +346,13 @@ where ) -> ProviderResult> { self.database.block_with_senders_range(range) } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.database.sealed_block_with_senders_range(range) + } } impl TransactionsProvider for BlockchainProvider @@ -530,22 +546,6 @@ where self.database.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()> { - self.database.provider()?.fill_block_env_at(block_env, at) - } - - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()> { - self.database.provider()?.fill_block_env_with_header(block_env, header) - } - fn fill_cfg_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 88793b279025..94ae361aff6c 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -10,11 +10,12 @@ use reth_db_api::{ transaction::DbTx, }; use reth_primitives::{ - constants::EPOCH_SLOTS, proofs::AccountProof, Account, Address, BlockNumber, Bytecode, - StaticFileSegment, StorageKey, StorageValue, B256, + constants::EPOCH_SLOTS, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, + StorageValue, B256, }; +use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::ProviderResult; -use reth_trie::{updates::TrieUpdates, HashedPostState}; +use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; use revm::db::BundleState; use std::fmt::Debug; @@ -271,6 +272,22 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { } } +impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { + /// Get account and storage proofs. + fn proof( + &self, + state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let mut revert_state = self.revert_state()?; + revert_state.extend(HashedPostState::from_bundle_state(&state.state)); + revert_state + .account_proof(self.tx, address, slots) + .map_err(|err| ProviderError::Database(err.into())) + } +} + impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { /// Get storage. fn storage( @@ -306,11 +323,6 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { self.tx.get::(code_hash).map_err(Into::into) } - - /// Get account and storage proofs. - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock) - } } /// State provider for a given block number. diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 4c5b90af8e9d..2ffd5fd3334d 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -8,11 +8,11 @@ use reth_db_api::{ transaction::DbTx, }; use reth_primitives::{ - proofs::AccountProof, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, - StorageValue, B256, + Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::{proof::Proof, updates::TrieUpdates, HashedPostState}; +use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; use revm::db::BundleState; /// State provider over latest state that takes tx reference. @@ -91,6 +91,19 @@ impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { } } +impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { + fn proof( + &self, + bundle_state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + Ok(HashedPostState::from_bundle_state(&bundle_state.state) + .account_proof(self.tx, address, slots) + .map_err(Into::::into)?) + } +} + impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { /// Get storage. fn storage( @@ -111,12 +124,6 @@ impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { self.tx.get::(code_hash).map_err(Into::into) } - - fn proof(&self, address: Address, slots: &[B256]) -> ProviderResult { - Ok(Proof::new(self.tx) - .account_proof(address, slots) - .map_err(Into::::into)?) - } } /// State provider for the latest state. diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 1d5a9597832c..344a21101f43 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -30,10 +30,6 @@ macro_rules! delegate_provider_impls { ($target:ty $(where [$($generics:tt)*])?) => { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => - StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult; - fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; - } AccountReader $(where [$($generics)*])? { fn basic_account(&self, address: reth_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; } @@ -41,11 +37,17 @@ macro_rules! delegate_provider_impls { fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; } - StateProvider $(where [$($generics)*])?{ + StateProvider $(where [$($generics)*])? { fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; - fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; } + StateRootProvider $(where [$($generics)*])? { + fn state_root(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult; + fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; + } + StateProofProvider $(where [$($generics)*])? { + fn proof(&self, state: &revm::db::BundleState, address: reth_primitives::Address, slots: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; + } ); } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 9409abb0d8e3..10d725d2a43f 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,10 +6,11 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; +use reth_chainspec::ChainInfo; use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; use reth_db_api::models::CompactU256; use reth_primitives::{ - Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, Header, Receipt, SealedHeader, + Address, BlockHash, BlockHashOrNumber, BlockNumber, Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 5a0b7588a763..0844044d427b 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -9,6 +9,7 @@ use crate::{ }; use dashmap::{mapref::entry::Entry as DashMapEntry, DashMap}; use parking_lot::RwLock; +use reth_chainspec::ChainInfo; use reth_db::{ lockfile::StorageLock, static_file::{iter_static_files, HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, @@ -24,8 +25,8 @@ use reth_nippy_jar::NippyJar; use reth_primitives::{ keccak256, static_file::{find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive}, - Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, Header, - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, + Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Header, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; @@ -1456,6 +1457,15 @@ impl BlockReader for StaticFileProvider { Err(ProviderError::UnsupportedProvider) } + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult> { + // Required data not present in static_files + Err(ProviderError::UnsupportedProvider) + } + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) @@ -1467,6 +1477,13 @@ impl BlockReader for StaticFileProvider { ) -> ProviderResult> { Err(ProviderError::UnsupportedProvider) } + + fn sealed_block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } } impl WithdrawalsProvider for StaticFileProvider { diff --git a/crates/storage/provider/src/providers/static_file/metrics.rs b/crates/storage/provider/src/providers/static_file/metrics.rs index f1a4204a7b58..72589ca69856 100644 --- a/crates/storage/provider/src/providers/static_file/metrics.rs +++ b/crates/storage/provider/src/providers/static_file/metrics.rs @@ -80,6 +80,28 @@ impl StaticFileProviderMetrics { .record(duration.as_secs_f64()); } } + + pub(crate) fn record_segment_operations( + &self, + segment: StaticFileSegment, + operation: StaticFileProviderOperation, + count: u64, + duration: Option, + ) { + self.segment_operations + .get(&(segment, operation)) + .expect("segment operation metrics should exist") + .calls_total + .increment(count); + + if let Some(duration) = duration { + self.segment_operations + .get(&(segment, operation)) + .expect("segment operation metrics should exist") + .write_duration_seconds + .record(duration.as_secs_f64() / count as f64); + } + } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)] diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 5c2057b3b57d..c5abdbe00c31 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -59,15 +59,9 @@ mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; use rand::seq::SliceRandom; - use reth_db::{ - static_file::create_static_file_T1_T2_T3, CanonicalHeaders, HeaderNumbers, - HeaderTerminalDifficulties, Headers, RawTable, - }; - use reth_db_api::{ - cursor::DbCursorRO, - transaction::{DbTx, DbTxMut}, - }; - use reth_primitives::{static_file::find_fixed_range, BlockNumber, B256, U256}; + use reth_db::{CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers}; + use reth_db_api::transaction::DbTxMut; + use reth_primitives::{static_file::find_fixed_range, B256, U256}; use reth_testing_utils::generators::{self, random_header_range}; #[test] @@ -75,12 +69,6 @@ mod tests { // Ranges let row_count = 100u64; let range = 0..=(row_count - 1); - let segment_header = SegmentHeader::new( - range.clone().into(), - Some(range.clone().into()), - Some(range.clone().into()), - StaticFileSegment::Headers, - ); // Data sources let factory = create_test_provider_factory(); @@ -112,48 +100,22 @@ mod tests { // Create StaticFile { - let with_compression = true; - let with_filter = true; - - let mut nippy_jar = NippyJar::new(3, static_file.as_path(), segment_header); - - if with_compression { - nippy_jar = nippy_jar.with_zstd(false, 0); + let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); + let mut td = U256::ZERO; + + for header in headers.clone() { + td += header.header().difficulty; + let hash = header.hash(); + writer.append_header(header.unseal(), td, hash).unwrap(); } - - if with_filter { - nippy_jar = nippy_jar.with_cuckoo_filter(row_count as usize + 10).with_fmph(); - } - - let provider = factory.provider().unwrap(); - let tx = provider.tx_ref(); - - // Hacky type inference. TODO fix - let mut none_vec = Some(vec![vec![vec![0u8]].into_iter()]); - let _ = none_vec.take(); - - // Generate list of hashes for filters & PHF - let mut cursor = tx.cursor_read::>().unwrap(); - let hashes = cursor - .walk(None) - .unwrap() - .map(|row| row.map(|(_key, value)| value.into_value()).map_err(|e| e.into())); - - create_static_file_T1_T2_T3::< - Headers, - HeaderTerminalDifficulties, - CanonicalHeaders, - BlockNumber, - SegmentHeader, - >(tx, range, None, none_vec, Some(hashes), row_count as usize, nippy_jar) - .unwrap(); + writer.commit().unwrap(); } // Use providers to query Header data and compare if it matches { let db_provider = factory.provider().unwrap(); - let manager = - StaticFileProvider::read_write(static_files_path.path()).unwrap().with_filters(); + let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); let jar_provider = manager .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); @@ -169,12 +131,12 @@ mod tests { // Compare Header assert_eq!(header, db_provider.header(&header_hash).unwrap().unwrap()); - assert_eq!(header, jar_provider.header(&header_hash).unwrap().unwrap()); + assert_eq!(header, jar_provider.header_by_number(header.number).unwrap().unwrap()); // Compare HeaderTerminalDifficulties assert_eq!( db_provider.header_td(&header_hash).unwrap().unwrap(), - jar_provider.header_td(&header_hash).unwrap().unwrap() + jar_provider.header_td_by_number(header.number).unwrap().unwrap() ); } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 99f9d21b9f79..397606083528 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -547,6 +547,44 @@ impl StaticFileProviderRW { Ok(result) } + /// Appends multiple receipts to the static file. + /// + /// Returns the current [`TxNumber`] as seen in the static file, if any. + pub fn append_receipts(&mut self, receipts: I) -> ProviderResult> + where + I: IntoIterator>, + { + let mut receipts_iter = receipts.into_iter().peekable(); + // If receipts are empty, we can simply return None + if receipts_iter.peek().is_none() { + return Ok(None); + } + + let start = Instant::now(); + self.ensure_no_queued_prune()?; + + // At this point receipts contains at least one receipt, so this would be overwritten. + let mut tx_number = 0; + let mut count: u64 = 0; + + for receipt_result in receipts_iter { + let (tx_num, receipt) = receipt_result?; + tx_number = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; + count += 1; + } + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operations( + StaticFileSegment::Receipts, + StaticFileProviderOperation::Append, + count, + Some(start.elapsed()), + ); + } + + Ok(Some(tx_number)) + } + /// Adds an instruction to prune `to_delete`transactions during commit. /// /// Note: `last_block` refers to the block the unwinds ends at. diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index be4420fc5199..2a0f900a5e16 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -5,13 +5,11 @@ use alloy_rlp::Decodable; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_primitives::{ - alloy_primitives, b256, - hex_literal::hex, - proofs::{state_root_unhashed, storage_root_unhashed}, - revm::compat::into_reth_acc, - Address, BlockNumber, Bytes, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, - TxType, Withdrawal, Withdrawals, B256, U256, + alloy_primitives, b256, hex_literal::hex, Account, Address, BlockNumber, Bytes, Header, + Receipt, Requests, SealedBlock, SealedBlockWithSenders, TxType, Withdrawal, Withdrawals, B256, + U256, }; +use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{ db::BundleState, primitives::{AccountInfo, HashMap}, @@ -121,7 +119,7 @@ fn bundle_state_root(execution_outcome: &ExecutionOutcome) -> B256 { ( address, ( - into_reth_acc(info.clone()), + Into::::into(info.clone()), storage_root_unhashed( account .storage diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index cab4417aea9e..6a2446d68f9d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -6,17 +6,18 @@ use crate::{ StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use parking_lot::Mutex; +use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - keccak256, proofs::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, - BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, - U256, + keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, + BlockWithSenders, Bytecode, Bytes, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::updates::TrieUpdates; +use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::{ db::BundleState, primitives::{BlockEnv, CfgEnvWithHandlerCfg}, @@ -46,7 +47,7 @@ impl Default for MockEthProvider { blocks: Default::default(), headers: Default::default(), accounts: Default::default(), - chain_spec: Arc::new(reth_primitives::ChainSpecBuilder::mainnet().build()), + chain_spec: Arc::new(reth_chainspec::ChainSpecBuilder::mainnet().build()), } } } @@ -474,6 +475,14 @@ impl BlockReader for MockEthProvider { Ok(None) } + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult> { + Ok(None) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { let lock = self.blocks.lock(); @@ -490,6 +499,13 @@ impl BlockReader for MockEthProvider { ) -> ProviderResult> { Ok(vec![]) } + + fn sealed_block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(vec![]) + } } impl BlockReaderIdExt for MockEthProvider { @@ -538,6 +554,17 @@ impl StateRootProvider for MockEthProvider { } } +impl StateProofProvider for MockEthProvider { + fn proof( + &self, + _state: &BundleState, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } +} + impl StateProvider for MockEthProvider { fn storage( &self, @@ -559,10 +586,6 @@ impl StateProvider for MockEthProvider { } })) } - - fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { - Ok(AccountProof::new(address)) - } } impl EvmEnvProvider for MockEthProvider { @@ -592,22 +615,6 @@ impl EvmEnvProvider for MockEthProvider { Ok(()) } - fn fill_block_env_at( - &self, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - ) -> ProviderResult<()> { - Ok(()) - } - - fn fill_block_env_with_header( - &self, - _block_env: &mut BlockEnv, - _header: &Header, - ) -> ProviderResult<()> { - Ok(()) - } - fn fill_cfg_env_at( &self, _cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 6f5ecd526783..4d40ad54e990 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,9 +1,9 @@ use crate::{providers::StaticFileProvider, ProviderFactory}; +use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, DatabaseEnv, }; -use reth_primitives::{ChainSpec, MAINNET}; use std::sync::Arc; pub mod blocks; diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index ce5e18de45a8..445d5666ac61 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,31 +1,37 @@ -use crate::{ - traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, - ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, - StateProviderFactory, StateRootProvider, TransactionVariant, TransactionsProvider, - WithdrawalsProvider, +use std::{ + ops::{RangeBounds, RangeInclusive}, + sync::Arc, }; + +use reth_chainspec::{ChainInfo, ChainSpec, MAINNET}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - proofs::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, - BlockNumber, BlockWithSenders, Bytecode, ChainInfo, ChainSpec, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, - MAINNET, U256, + Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, + Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageKey, + StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, + Withdrawal, Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::TrieUpdates; +use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::{ db::BundleState, primitives::{BlockEnv, CfgEnvWithHandlerCfg}, }; -use std::{ - ops::{RangeBounds, RangeInclusive}, - sync::Arc, +use tokio::sync::broadcast; + +use crate::{ + providers::StaticFileProvider, + traits::{BlockSource, ReceiptProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, HeaderProvider, PruneCheckpointReader, ReceiptProviderIdExt, RequestsProvider, + StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, + StateRootProvider, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; /// Supports various api interfaces for testing purposes. @@ -113,6 +119,14 @@ impl BlockReader for NoopProvider { Ok(None) } + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult> { + Ok(None) + } + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { Ok(vec![]) } @@ -123,6 +137,13 @@ impl BlockReader for NoopProvider { ) -> ProviderResult> { Ok(vec![]) } + + fn sealed_block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(vec![]) + } } impl BlockReaderIdExt for NoopProvider { @@ -306,6 +327,17 @@ impl StateRootProvider for NoopProvider { } } +impl StateProofProvider for NoopProvider { + fn proof( + &self, + _state: &BundleState, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } +} + impl StateProvider for NoopProvider { fn storage( &self, @@ -318,10 +350,6 @@ impl StateProvider for NoopProvider { fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { Ok(None) } - - fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { - Ok(AccountProof::new(address)) - } } impl EvmEnvProvider for NoopProvider { @@ -351,22 +379,6 @@ impl EvmEnvProvider for NoopProvider { Ok(()) } - fn fill_block_env_at( - &self, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - ) -> ProviderResult<()> { - Ok(()) - } - - fn fill_block_env_with_header( - &self, - _block_env: &mut BlockEnv, - _header: &Header, - ) -> ProviderResult<()> { - Ok(()) - } - fn fill_cfg_env_at( &self, _cfg: &mut CfgEnvWithHandlerCfg, @@ -466,3 +478,15 @@ impl PruneCheckpointReader for NoopProvider { Ok(None) } } + +impl StaticFileProviderFactory for NoopProvider { + fn static_file_provider(&self) -> StaticFileProvider { + StaticFileProvider::default() + } +} + +impl CanonStateSubscriptions for NoopProvider { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + broadcast::channel(1).1 + } +} diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 56255969e97b..3d0cf3c0cb4d 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,7 +1,6 @@ -use crate::{Chain, ExecutionOutcome}; use reth_db_api::models::StoredBlockBodyIndices; +use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{BlockNumber, SealedBlockWithSenders}; -use reth_prune_types::PruneModes; use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -41,11 +40,8 @@ pub trait BlockWriter: Send + Sync { /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. - fn insert_block( - &self, - block: SealedBlockWithSenders, - prune_modes: Option<&PruneModes>, - ) -> ProviderResult; + fn insert_block(&self, block: SealedBlockWithSenders) + -> ProviderResult; /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. @@ -57,7 +53,6 @@ pub trait BlockWriter: Send + Sync { /// /// - `blocks`: Vector of `SealedBlockWithSenders` instances to append. /// - `state`: Post-state information to update after appending. - /// - `prune_modes`: Optional pruning configuration. /// /// # Returns /// @@ -68,6 +63,5 @@ pub trait BlockWriter: Send + Sync { execution_outcome: ExecutionOutcome, hashed_state: HashedPostState, trie_updates: TrieUpdates, - prune_modes: Option<&PruneModes>, ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 6b35d6b25135..c53150560d3a 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,8 +2,8 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, - StaticFileProviderFactory, + DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, StageCheckpointReader, + StateProviderFactory, StaticFileProviderFactory, TransactionsProvider, }; use reth_db_api::database::Database; @@ -41,3 +41,31 @@ impl FullProvider for T where + 'static { } + +/// Helper trait to unify all provider traits required to support `eth` RPC server behaviour, for +/// simplicity. +pub trait FullRpcProvider: + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + BlockReaderIdExt + + HeaderProvider + + TransactionsProvider + + Clone + + Unpin + + 'static +{ +} + +impl FullRpcProvider for T where + T: StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + BlockReaderIdExt + + HeaderProvider + + TransactionsProvider + + Clone + + Unpin + + 'static +{ +} diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index 54556101aec5..faa02b39e96b 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -3,17 +3,6 @@ use reth_primitives::{BlockHashOrNumber, BlockNumber, SealedHeader, B256}; use reth_storage_errors::provider::ProviderResult; use tokio::sync::watch; -/// The header sync mode. -#[derive(Clone, Debug)] -pub enum HeaderSyncMode { - /// A sync mode in which the stage continuously requests the downloader for - /// next blocks. - Continuous, - /// A sync mode in which the stage polls the receiver for the next tip - /// to download from. - Tip(watch::Receiver), -} - /// Represents a gap to sync: from `local_head` to `target` #[derive(Clone, Debug)] pub struct HeaderSyncGap { @@ -38,13 +27,13 @@ impl HeaderSyncGap { /// Client trait for determining the current headers sync gap. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderSyncGapProvider: Send + Sync { - /// Find a current sync gap for the headers depending on the [HeaderSyncMode] and the last + /// Find a current sync gap for the headers depending on the last /// uninterrupted block number. Last uninterrupted block represents the block number before /// which there are no gaps. It's up to the caller to ensure that last uninterrupted block is /// determined correctly. fn sync_gap( &self, - mode: HeaderSyncMode, + tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, ) -> ProviderResult; } diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 283ba5a48eed..16071edfff12 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -13,7 +13,7 @@ mod chain_info; pub use chain_info::CanonChainTracker; mod header_sync_gap; -pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode}; +pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; pub use state::StateWriter; @@ -43,7 +43,7 @@ mod stats; pub use stats::StatsReader; mod full; -pub use full::FullProvider; +pub use full::{FullProvider, FullRpcProvider}; mod tree_viewer; pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/spec.rs b/crates/storage/provider/src/traits/spec.rs index 47d95fbd586b..798bfeae16fd 100644 --- a/crates/storage/provider/src/traits/spec.rs +++ b/crates/storage/provider/src/traits/spec.rs @@ -1,7 +1,8 @@ -use reth_primitives::ChainSpec; +use reth_chainspec::ChainSpec; use std::sync::Arc; /// A trait for reading the current chainspec. +#[auto_impl::auto_impl(&, Arc)] pub trait ChainSpecProvider: Send + Sync { /// Get an [`Arc`] to the chainspec. fn chain_spec(&self) -> Arc; diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 2c6f5bc1460a..36a0c2a138f0 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-execution-types.workspace = true reth-db-api.workspace = true reth-primitives.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 539930f5a5c6..3dc22de8ae4f 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -118,19 +118,35 @@ pub trait BlockReader: transaction_kind: TransactionVariant, ) -> ProviderResult>; + /// Returns the sealed block with senders with matching number or hash from database. + /// + /// Returns the block's transactions in the requested variant. + /// + /// Returns `None` if block is not found. + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>; + /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks fn block_range(&self, range: RangeInclusive) -> ProviderResult>; - /// retrieves a range of blocks from the database, along with the senders of each + /// Returns a range of blocks from the database, along with the senders of each /// transaction in the blocks. - /// - /// The `transaction_kind` parameter determines whether to return its hash fn block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult>; + + /// Returns a range of sealed blocks from the database, along with the senders of each + /// transaction in the blocks. + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>; } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index e648aa609eda..ca92b8a2f184 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -1,5 +1,6 @@ use crate::BlockHashReader; -use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, ChainInfo, B256}; +use reth_chainspec::ChainInfo; +use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, B256}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Client trait for getting important block numbers (such as the latest block number), converting diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index c5663bc2a360..440c27d37dcc 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -51,3 +51,5 @@ pub use trie::*; mod withdrawals; pub use withdrawals::*; + +pub mod noop; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs new file mode 100644 index 000000000000..a55371f3c3d9 --- /dev/null +++ b/crates/storage/storage-api/src/noop.rs @@ -0,0 +1,44 @@ +//! Various noop implementations for traits. + +use crate::{BlockHashReader, BlockNumReader}; +use reth_chainspec::ChainInfo; +use reth_primitives::{BlockNumber, B256}; +use reth_storage_errors::provider::ProviderResult; + +/// Supports various api interfaces for testing purposes. +#[derive(Debug, Clone, Default, Copy)] +#[non_exhaustive] +pub struct NoopBlockReader; + +/// Noop implementation for testing purposes +impl BlockHashReader for NoopBlockReader { + fn block_hash(&self, _number: u64) -> ProviderResult> { + Ok(None) + } + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> ProviderResult> { + Ok(vec![]) + } +} + +impl BlockNumReader for NoopBlockReader { + fn chain_info(&self) -> ProviderResult { + Ok(ChainInfo::default()) + } + + fn best_block_number(&self) -> ProviderResult { + Ok(0) + } + + fn last_block_number(&self) -> ProviderResult { + Ok(0) + } + + fn block_number(&self, _hash: B256) -> ProviderResult> { + Ok(None) + } +} diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index 246c6a6cf008..79941e1090a5 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -38,6 +38,7 @@ pub trait ReceiptProvider: Send + Sync { /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. +#[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index db3230ee46e1..44c4a8b7f76f 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -1,9 +1,9 @@ -use super::{AccountReader, BlockHashReader, BlockIdReader, StateRootProvider}; +use super::{AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider}; use auto_impl::auto_impl; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, - Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, + Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, + StorageValue, B256, KECCAK_EMPTY, U256, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -12,7 +12,9 @@ pub type StateProviderBox = Box; /// An abstraction for a type that provides state data. #[auto_impl(&, Arc, Box)] -pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + Send + Sync { +pub trait StateProvider: + BlockHashReader + AccountReader + StateRootProvider + StateProofProvider + Send + Sync +{ /// Get storage of given account. fn storage( &self, @@ -23,9 +25,6 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult>; - /// Get account and storage proofs. - fn proof(&self, address: Address, keys: &[B256]) -> ProviderResult; - /// Get account code by its address. /// /// Returns `None` if the account doesn't exist or account is not a contract diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 083f565492e4..0ab25d18ad8d 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,6 +1,6 @@ -use reth_primitives::B256; +use reth_primitives::{Address, B256}; use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::TrieUpdates; +use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::db::BundleState; /// A type that can compute the state root of a given post state. @@ -22,3 +22,16 @@ pub trait StateRootProvider: Send + Sync { bundle_state: &BundleState, ) -> ProviderResult<(B256, TrieUpdates)>; } + +/// A type that can generate state proof on top of a given post state. +#[auto_impl::auto_impl(&, Box, Arc)] +pub trait StateProofProvider: Send + Sync { + /// Get account and storage proofs of target keys in the `BundleState` + /// on top of the current state. + fn proof( + &self, + state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult; +} diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 63eb870fc15e..82c80c0932b8 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -23,6 +23,7 @@ reth-metrics.workspace = true metrics.workspace = true # misc +auto_impl.workspace = true tracing.workspace = true thiserror.workspace = true dyn-clone.workspace = true diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index de0043fb8366..50be66c96ce1 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -84,6 +84,7 @@ pub mod pool; /// ``` /// /// The [`TaskSpawner`] trait is [`DynClone`] so `Box` are also `Clone`. +#[auto_impl::auto_impl(&, Arc)] pub trait TaskSpawner: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// Spawns the task onto the runtime. /// See also [`Handle::spawn`]. @@ -464,7 +465,7 @@ impl TaskExecutor { error!("{task_error}"); let _ = panicked_tasks_tx.send(task_error); }) - .map(|_| ()) + .map(drop) .in_current_span(); self.handle.spawn(task) @@ -513,7 +514,7 @@ impl TaskExecutor { error!("{task_error}"); let _ = panicked_tasks_tx.send(task_error); }) - .map(|_| ()) + .map(drop) .in_current_span(); self.handle.spawn(task) @@ -580,6 +581,7 @@ impl TaskSpawner for TaskExecutor { } /// `TaskSpawner` with extended behaviour +#[auto_impl::auto_impl(&, Arc)] pub trait TaskSpawnerExt: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// This spawns a critical task onto the runtime. /// diff --git a/crates/tasks/src/pool.rs b/crates/tasks/src/pool.rs index dbb4e19de98d..10fedccedd1f 100644 --- a/crates/tasks/src/pool.rs +++ b/crates/tasks/src/pool.rs @@ -43,7 +43,8 @@ impl BlockingTaskGuard { /// /// This is a dedicated threadpool for blocking tasks which are CPU bound. /// RPC calls that perform blocking IO (disk lookups) are not executed on this pool but on the tokio -/// runtime's blocking pool, which performs poorly with CPU bound tasks. Once the tokio blocking +/// runtime's blocking pool, which performs poorly with CPU bound tasks (see +/// ). Once the tokio blocking /// pool is saturated it is converted into a queue, blocking tasks could then interfere with the /// queue and block other RPC calls. /// diff --git a/crates/tasks/src/shutdown.rs b/crates/tasks/src/shutdown.rs index 918a0cf36a57..bd9a50dc9aa3 100644 --- a/crates/tasks/src/shutdown.rs +++ b/crates/tasks/src/shutdown.rs @@ -20,7 +20,7 @@ pub struct GracefulShutdown { } impl GracefulShutdown { - pub(crate) fn new(shutdown: Shutdown, guard: GracefulShutdownGuard) -> Self { + pub(crate) const fn new(shutdown: Shutdown, guard: GracefulShutdownGuard) -> Self { Self { shutdown, guard: Some(guard) } } diff --git a/crates/tokio-util/Cargo.toml b/crates/tokio-util/Cargo.toml index ccace030c0f7..3a8ad768d59b 100644 --- a/crates/tokio-util/Cargo.toml +++ b/crates/tokio-util/Cargo.toml @@ -19,4 +19,7 @@ tokio = { workspace = true, features = ["sync"] } tokio-stream = { workspace = true, features = ["sync"] } [dev-dependencies] -tokio = { workspace = true, features = ["full", "macros"] } \ No newline at end of file +tokio = { workspace = true, features = ["full", "macros"] } + +[features] +time = ["tokio/time"] \ No newline at end of file diff --git a/crates/tokio-util/src/lib.rs b/crates/tokio-util/src/lib.rs index 2053bf60bc56..e476c4063d9e 100644 --- a/crates/tokio-util/src/lib.rs +++ b/crates/tokio-util/src/lib.rs @@ -12,3 +12,6 @@ mod event_sender; mod event_stream; pub use event_sender::EventSender; pub use event_stream::EventStream; + +#[cfg(feature = "time")] +pub mod ratelimit; diff --git a/crates/net/common/src/ratelimit.rs b/crates/tokio-util/src/ratelimit.rs similarity index 100% rename from crates/net/common/src/ratelimit.rs rename to crates/tokio-util/src/ratelimit.rs diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index e742c569b76a..77edd6f3e541 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -13,11 +13,14 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives.workspace = true +reth-execution-types.workspace = true reth-fs-util.workspace = true reth-provider.workspace = true reth-tasks.workspace = true +revm.workspace = true # ethereum alloy-rlp.workspace = true @@ -42,12 +45,12 @@ serde = { workspace = true, features = ["derive", "rc"], optional = true } bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true -itertools.workspace = true # testing rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } proptest = { workspace = true, optional = true } +proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } @@ -56,6 +59,7 @@ reth-tracing.workspace = true paste.workspace = true rand.workspace = true proptest.workspace = true +proptest-arbitrary-interop.workspace = true criterion.workspace = true pprof = { workspace = true, features = ["criterion", "flamegraph"] } assert_matches.workspace = true @@ -66,7 +70,7 @@ serde_json.workspace = true default = ["serde"] serde = ["dep:serde"] test-utils = ["rand", "paste", "serde"] -arbitrary = ["proptest", "reth-primitives/arbitrary"] +arbitrary = ["proptest", "reth-primitives/arbitrary", "proptest-arbitrary-interop"] [[bench]] name = "truncate" diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 7c56b710450c..5f88d515d32e 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -79,7 +79,7 @@ //! Listen for new transactions and print them: //! //! ``` -//! use reth_primitives::MAINNET; +//! use reth_chainspec::MAINNET; //! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; @@ -107,7 +107,7 @@ //! //! ``` //! use futures_util::Stream; -//! use reth_primitives::MAINNET; +//! use reth_chainspec::MAINNET; //! use reth_provider::{BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_tasks::TaskSpawner; @@ -148,7 +148,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![warn(clippy::missing_const_for_fn)] use crate::{identifier::TransactionId, pool::PoolInner}; use aquamarine as _; @@ -285,7 +284,7 @@ where /// # Example /// /// ``` - /// use reth_primitives::MAINNET; + /// use reth_chainspec::MAINNET; /// use reth_provider::{BlockReaderIdExt, StateProviderFactory}; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::{ diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index cb44f5af28e1..3acfae135ddf 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -11,6 +11,7 @@ use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; +use reth_execution_types::ExecutionOutcome; use reth_fs_util::FsPathError; use reth_primitives::{ Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredPooledTransaction, @@ -18,7 +19,7 @@ use reth_primitives::{ TryFromRecoveredTransaction, }; use reth_provider::{ - BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, ExecutionOutcome, ProviderError, + BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, ProviderError, StateProviderFactory, }; use reth_tasks::TaskSpawner; @@ -681,8 +682,9 @@ mod tests { blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, CoinbaseTipOrdering, EthPooledTransaction, Pool, PoolTransaction, TransactionOrigin, }; + use reth_chainspec::MAINNET; use reth_fs_util as fs; - use reth_primitives::{hex, PooledTransactionsElement, MAINNET, U256}; + use reth_primitives::{hex, PooledTransactionsElement, U256}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_tasks::TaskManager; diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 90d46854d43c..c75e3403cbd5 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -104,4 +104,10 @@ pub struct AllTransactionsMetrics { pub(crate) all_transactions_by_id: Gauge, /// Number of all transactions by all senders in the pool pub(crate) all_transactions_by_all_senders: Gauge, + /// Number of blob transactions nonce gaps. + pub(crate) blob_transactions_nonce_gaps: Counter, + /// The current blob base fee + pub(crate) blob_base_fee: Gauge, + /// The current base fee + pub(crate) base_fee: Gauge, } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 2f40223af7c9..74f3fa8a4bc6 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -49,18 +49,16 @@ impl Iterator for BestTransactionsWithFees { // find the next transaction that satisfies the base fee loop { let best = self.best.next()?; - if best.transaction.max_fee_per_gas() < self.base_fee as u128 { - // tx violates base fee, mark it as invalid and continue - crate::traits::BestTransactions::mark_invalid(self, &best); - } else { - // tx is EIP4844 and violates blob fee, mark it as invalid and continue - if best.transaction.max_fee_per_blob_gas().is_some_and(|max_fee_per_blob_gas| { - max_fee_per_blob_gas < self.base_fee_per_blob_gas as u128 - }) { - crate::traits::BestTransactions::mark_invalid(self, &best); - continue; - }; + // If both the base fee and blob fee (if applicable for EIP-4844) are satisfied, return + // the transaction + if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && + best.transaction + .max_fee_per_blob_gas() + .map_or(true, |fee| fee >= self.base_fee_per_blob_gas as u128) + { return Some(best); + } else { + crate::traits::BestTransactions::mark_invalid(self, &best); } } } @@ -270,7 +268,9 @@ mod tests { use crate::{ pool::pending::PendingPool, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, + Priority, }; + use reth_primitives::U256; #[test] fn test_best_iter() { @@ -321,4 +321,279 @@ mod tests { // iterator is empty assert!(best.next().is_none()); } + + #[test] + fn test_best_with_fees_iter_base_fee_satisfied() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 15; + + // Insert transactions with a max_fee_per_gas greater than or equal to the base fee + // Without blob fee + for nonce in 0..num_tx { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + for nonce in 0..num_tx { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + assert!(tx.transaction.max_fee_per_gas() >= base_fee as u128); + } + } + + #[test] + fn test_best_with_fees_iter_base_fee_violated() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 20; + let base_fee_per_blob_gas: u64 = 15; + + // Insert transactions with a max_fee_per_gas less than the base fee + for nonce in 0..num_tx { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 - 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // No transaction should be returned since all violate the base fee + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_blob_fee_satisfied() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 20; + + // Insert transactions with a max_fee_per_blob_gas greater than or equal to the base fee per + // blob gas + for nonce in 0..num_tx { + let tx = MockTransaction::eip4844() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // All transactions should be returned in order since they satisfy both base fee and blob + // fee + for nonce in 0..num_tx { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + assert!(tx.transaction.max_fee_per_gas() >= base_fee as u128); + assert!( + tx.transaction.max_fee_per_blob_gas().unwrap() >= base_fee_per_blob_gas as u128 + ); + } + + // No more transactions should be returned + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_blob_fee_violated() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 20; + + // Insert transactions with a max_fee_per_blob_gas less than the base fee per blob gas + for nonce in 0..num_tx { + let tx = MockTransaction::eip4844() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 - 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // No transaction should be returned since all violate the blob fee + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_mixed_fees() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 20; + + // Insert transactions with varying max_fee_per_gas and max_fee_per_blob_gas + let tx1 = + MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5); + let tx2 = MockTransaction::eip4844() + .rng_hash() + .with_nonce(1) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + let tx3 = MockTransaction::eip4844() + .rng_hash() + .with_nonce(2) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 - 5); + let tx4 = + MockTransaction::eip1559().rng_hash().with_nonce(3).with_max_fee(base_fee as u128 - 5); + + pool.add_transaction(Arc::new(f.validated(tx1.clone())), 0); + pool.add_transaction(Arc::new(f.validated(tx2.clone())), 0); + pool.add_transaction(Arc::new(f.validated(tx3)), 0); + pool.add_transaction(Arc::new(f.validated(tx4)), 0); + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + let expected_order = vec![tx1, tx2]; + for expected_tx in expected_order { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, expected_tx); + } + + // No more transactions should be returned + assert!(best.next().is_none()); + } + + #[test] + fn test_best_add_transaction_with_next_nonce() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 transactions with increasing nonces to the pool + let num_tx = 5; + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Create a BestTransactions iterator from the pool + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Create a new transaction with nonce 5 and validate it + let new_tx = MockTransaction::eip1559().rng_hash().with_nonce(5); + let valid_new_tx = f.validated(new_tx); + + // Send the new transaction through the broadcast channel + let pending_tx = PendingTransaction { + submission_id: 10, + transaction: Arc::new(valid_new_tx.clone()), + priority: Priority::Value(U256::from(1000)), + }; + tx_sender.send(pending_tx.clone()).unwrap(); + + // Add new transactions to the iterator + best.add_new_transactions(); + + // Verify that the new transaction has been added to the 'all' map + assert_eq!(best.all.len(), 6); + assert!(best.all.contains_key(valid_new_tx.id())); + + // Verify that the new transaction has been added to the 'independent' set + assert_eq!(best.independent.len(), 2); + assert!(best.independent.contains(&pending_tx)); + } + + #[test] + fn test_best_add_transaction_with_ancestor() { + // Initialize a new PendingPool with default MockOrdering and MockTransactionFactory + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 transactions with increasing nonces to the pool + let num_tx = 5; + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Create a BestTransactions iterator from the pool + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Create a new transaction with nonce 5 and validate it + let base_tx1 = MockTransaction::eip1559().rng_hash().with_nonce(5); + let valid_new_tx1 = f.validated(base_tx1.clone()); + + // Send the new transaction through the broadcast channel + let pending_tx1 = PendingTransaction { + submission_id: 10, + transaction: Arc::new(valid_new_tx1.clone()), + priority: Priority::Value(U256::from(1000)), + }; + tx_sender.send(pending_tx1.clone()).unwrap(); + + // Add new transactions to the iterator + best.add_new_transactions(); + + // Verify that the new transaction has been added to the 'all' map + assert_eq!(best.all.len(), 6); + assert!(best.all.contains_key(valid_new_tx1.id())); + + // Verify that the new transaction has been added to the 'independent' set + assert_eq!(best.independent.len(), 2); + assert!(best.independent.contains(&pending_tx1)); + + // Attempt to add a new transaction with a different nonce (not a duplicate) + let base_tx2 = base_tx1.with_nonce(6); + let valid_new_tx2 = f.validated(base_tx2); + + // Send the new transaction through the broadcast channel + let pending_tx2 = PendingTransaction { + submission_id: 11, // Different submission ID + transaction: Arc::new(valid_new_tx2.clone()), + priority: Priority::Value(U256::from(1000)), + }; + tx_sender.send(pending_tx2.clone()).unwrap(); + + // Add new transactions to the iterator + best.add_new_transactions(); + + // Verify that the new transaction has been added to 'all' + assert_eq!(best.all.len(), 7); + assert!(best.all.contains_key(valid_new_tx2.id())); + + // Verify that the new transaction has not been added to the 'independent' set + assert_eq!(best.independent.len(), 2); + assert!(!best.independent.contains(&pending_tx2)); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 2238a5ae8e92..67be88d868f8 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,7 +18,6 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use itertools::Itertools; use reth_primitives::{ constants::{ eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, @@ -983,9 +982,13 @@ impl AllTransactions { } = block_info; self.last_seen_block_number = last_seen_block_number; self.last_seen_block_hash = last_seen_block_hash; + self.pending_fees.base_fee = pending_basefee; + self.metrics.base_fee.set(pending_basefee as f64); + if let Some(pending_blob_fee) = pending_blob_fee { self.pending_fees.blob_fee = pending_blob_fee; + self.metrics.blob_base_fee.set(pending_blob_fee as f64); } } @@ -1336,12 +1339,14 @@ impl AllTransactions { if let Some(ancestor) = ancestor { let Some(ancestor_tx) = self.txs.get(&ancestor) else { // ancestor tx is missing, so we can't insert the new blob - return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }); + self.metrics.blob_transactions_nonce_gaps.increment(1); + return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }) }; if ancestor_tx.state.has_nonce_gap() { // the ancestor transaction already has a nonce gap, so we can't insert the new // blob - return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }); + self.metrics.blob_transactions_nonce_gaps.increment(1); + return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }) } // the max cost executing this transaction requires @@ -1800,35 +1805,6 @@ impl Default for UpdateOutcome { } } -/// Represents the outcome of a prune -pub struct PruneResult { - /// A list of added transactions that a pruned marker satisfied - pub promoted: Vec>, - /// all transactions that failed to be promoted and now are discarded - pub failed: Vec, - /// all transactions that were pruned from the ready pool - pub pruned: Vec>>, -} - -impl fmt::Debug for PruneResult { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PruneResult") - .field( - "promoted", - &format_args!("[{}]", self.promoted.iter().map(|tx| tx.hash()).format(", ")), - ) - .field("failed", &self.failed) - .field( - "pruned", - &format_args!( - "[{}]", - self.pruned.iter().map(|tx| tx.transaction.hash()).format(", ") - ), - ) - .finish() - } -} - /// Stores relevant context about a sender. #[derive(Debug, Clone, Default)] pub(crate) struct SenderInfo { diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 0b981ea155d6..2be4d8aa9a9a 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,9 +1,10 @@ use crate::EthPooledTransaction; use rand::Rng; +use reth_chainspec::MAINNET; use reth_primitives::{ constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, Transaction, TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, TxKind, TxLegacy, B256, - MAINNET, U256, + U256, }; /// A generator for transactions for testing purposes. @@ -225,13 +226,13 @@ impl TransactionBuilder { } /// Increments the nonce value of the transaction builder by 1. - pub fn inc_nonce(mut self) -> Self { + pub const fn inc_nonce(mut self) -> Self { self.nonce += 1; self } /// Decrements the nonce value of the transaction builder by 1, avoiding underflow. - pub fn decr_nonce(mut self) -> Self { + pub const fn decr_nonce(mut self) -> Self { self.nonce = self.nonce.saturating_sub(1); self } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 04388df4fd1f..2e12c89bc035 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -987,9 +987,10 @@ impl From for Transaction { impl proptest::arbitrary::Arbitrary for MockTransaction { type Parameters = (); fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - use proptest::prelude::{any, Strategy}; + use proptest::prelude::Strategy; + use proptest_arbitrary_interop::arb; - any::<(Transaction, Address, B256)>() + arb::<(Transaction, Address, B256)>() .prop_map(|(tx, sender, tx_hash)| match &tx { Transaction::Legacy(TxLegacy { chain_id, @@ -1447,7 +1448,7 @@ pub struct MockTransactionSet { impl MockTransactionSet { /// Create a new [`MockTransactionSet`] from a list of transactions - fn new(transactions: Vec) -> Self { + const fn new(transactions: Vec) -> Self { Self { transactions } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c9709455ee78..c8a24341b283 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -39,7 +39,7 @@ pub type PeerId = reth_primitives::B512; /// /// Note: This requires `Clone` for convenience, since it is assumed that this will be implemented /// for a wrapped `Arc` type, see also [`Pool`](crate::Pool). -#[auto_impl::auto_impl(Arc)] +#[auto_impl::auto_impl(&, Arc)] pub trait TransactionPool: Send + Sync + Clone { /// The transaction type of the pool type Transaction: PoolTransaction; @@ -388,7 +388,7 @@ pub trait TransactionPool: Send + Sync + Clone { } /// Extension for [TransactionPool] trait that allows to set the current block info. -#[auto_impl::auto_impl(Arc)] +#[auto_impl::auto_impl(&, Arc)] pub trait TransactionPoolExt: TransactionPool { /// Sets the current block info for the pool. fn set_block_info(&self, info: BlockInfo); diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 53e725a31ebe..3690513d9c1e 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -9,18 +9,18 @@ use crate::{ EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_primitives::{ - constants::{ - eip4844::{MAINNET_KZG_TRUSTED_SETUP, MAX_BLOBS_PER_BLOCK}, - ETHEREUM_BLOCK_GAS_LIMIT, - }, - kzg::KzgSettings, - revm::compat::calculate_intrinsic_gas_after_merge, - ChainSpec, GotExpected, InvalidTransactionError, SealedBlock, EIP1559_TX_TYPE_ID, + constants::{eip4844::MAX_BLOBS_PER_BLOCK, ETHEREUM_BLOCK_GAS_LIMIT}, + GotExpected, InvalidTransactionError, SealedBlock, TxKind, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use reth_provider::{AccountReader, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; +use revm::{ + interpreter::gas::validate_initial_tx_gas, + primitives::{AccessListItem, EnvKzgSettings, SpecId}, +}; use std::{ marker::PhantomData, sync::{atomic::AtomicBool, Arc}, @@ -124,7 +124,7 @@ pub(crate) struct EthTransactionValidatorInner { /// Minimum priority fee to enforce for acceptance into the pool. minimum_priority_fee: Option, /// Stores the setup and parameters needed for validating KZG proofs. - kzg_settings: Arc, + kzg_settings: EnvKzgSettings, /// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// Maximum size in bytes a single transaction can have in order to be accepted into the pool. @@ -368,7 +368,7 @@ where } EthBlobTransactionSidecar::Present(blob) => { // validate the blob - if let Err(err) = transaction.validate_blob(&blob, &self.kzg_settings) { + if let Err(err) = transaction.validate_blob(&blob, self.kzg_settings.get()) { return TransactionValidationOutcome::Invalid( transaction, InvalidPoolTransactionError::Eip4844( @@ -434,7 +434,7 @@ pub struct EthTransactionValidatorBuilder { additional_tasks: usize, /// Stores the setup and parameters needed for validating KZG proofs. - kzg_settings: Arc, + kzg_settings: EnvKzgSettings, /// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// Max size in bytes of a single transaction allowed @@ -456,7 +456,7 @@ impl EthTransactionValidatorBuilder { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, minimum_priority_fee: None, additional_tasks: 1, - kzg_settings: Arc::clone(&MAINNET_KZG_TRUSTED_SETUP), + kzg_settings: EnvKzgSettings::Default, local_transactions_config: Default::default(), max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, @@ -537,8 +537,8 @@ impl EthTransactionValidatorBuilder { self } - /// Sets the [`KzgSettings`] to use for validating KZG proofs. - pub fn kzg_settings(mut self, kzg_settings: Arc) -> Self { + /// Sets the [`EnvKzgSettings`] to use for validating KZG proofs. + pub fn kzg_settings(mut self, kzg_settings: EnvKzgSettings) -> Self { self.kzg_settings = kzg_settings; self } @@ -712,12 +712,11 @@ pub fn ensure_intrinsic_gas( transaction: &T, is_shanghai: bool, ) -> Result<(), InvalidPoolTransactionError> { - let access_list = transaction.access_list().map(|list| list.flattened()).unwrap_or_default(); if transaction.gas_limit() < calculate_intrinsic_gas_after_merge( transaction.input(), &transaction.kind(), - &access_list, + transaction.access_list().map(|list| list.0.as_slice()).unwrap_or(&[]), is_shanghai, ) { @@ -727,6 +726,20 @@ pub fn ensure_intrinsic_gas( } } +/// Calculates the Intrinsic Gas usage for a Transaction +/// +/// Caution: This only checks past the Merge hardfork. +#[inline] +pub fn calculate_intrinsic_gas_after_merge( + input: &[u8], + kind: &TxKind, + access_list: &[AccessListItem], + is_shanghai: bool, +) -> u64 { + let spec_id = if is_shanghai { SpecId::SHANGHAI } else { SpecId::MERGE }; + validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list, 0) +} + #[cfg(test)] mod tests { use super::*; @@ -734,9 +747,8 @@ mod tests { blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; - use reth_primitives::{ - hex, FromRecoveredPooledTransaction, PooledTransactionsElement, MAINNET, U256, - }; + use reth_chainspec::MAINNET; + use reth_primitives::{hex, FromRecoveredPooledTransaction, PooledTransactionsElement, U256}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; fn get_transaction() -> EthPooledTransaction { diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 31bd997f1ac3..4051230d618a 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -7,7 +7,8 @@ use crate::{ TransactionValidator, }; use futures_util::{lock::Mutex, StreamExt}; -use reth_primitives::{ChainSpec, SealedBlock}; +use reth_chainspec::ChainSpec; +use reth_primitives::SealedBlock; use reth_provider::BlockReaderIdExt; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; diff --git a/crates/trie/types/Cargo.toml b/crates/trie/common/Cargo.toml similarity index 55% rename from crates/trie/types/Cargo.toml rename to crates/trie/common/Cargo.toml index 4dc2f15dc99f..da5d5a828cbf 100644 --- a/crates/trie/types/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "reth-trie-types" +name = "reth-trie-common" version.workspace = true edition.workspace = true homepage.workspace = true @@ -12,22 +12,42 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] +reth-primitives-traits.workspace = true reth-codecs.workspace = true alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie = { workspace = true, features = ["serde"] } +alloy-consensus.workspace = true +alloy-genesis.workspace = true +revm-primitives.workspace = true + bytes.workspace = true derive_more.workspace = true serde.workspace = true - +itertools.workspace = true nybbles = { workspace = true, features = ["serde", "rlp"] } +# `test-utils` feature +hash-db = { version = "=0.15.2", optional = true } +plain_hasher = { version = "0.2", optional = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } + [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true +proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true serde_json.workspace = true test-fuzz.workspace = true -toml.workspace = true \ No newline at end of file +toml.workspace = true +hash-db = "=0.15.2" +plain_hasher = "0.2" + +[features] +test-utils = ["dep:plain_hasher", "dep:hash-db", "arbitrary"] +arbitrary = [ + "alloy-trie/arbitrary", + "dep:arbitrary", +] diff --git a/crates/trie/common/src/account.rs b/crates/trie/common/src/account.rs new file mode 100644 index 000000000000..64860ab78b31 --- /dev/null +++ b/crates/trie/common/src/account.rs @@ -0,0 +1,73 @@ +use crate::root::storage_root_unhashed; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_genesis::GenesisAccount; +use alloy_primitives::{keccak256, B256, U256}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use alloy_trie::EMPTY_ROOT_HASH; +use reth_primitives_traits::Account; +use revm_primitives::AccountInfo; + +/// An Ethereum account as represented in the trie. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)] +pub struct TrieAccount { + /// Account nonce. + pub nonce: u64, + /// Account balance. + pub balance: U256, + /// Account's storage root. + pub storage_root: B256, + /// Hash of the account's bytecode. + pub code_hash: B256, +} + +impl TrieAccount { + /// Get account's storage root. + pub const fn storage_root(&self) -> B256 { + self.storage_root + } +} + +impl From for TrieAccount { + fn from(account: GenesisAccount) -> Self { + let storage_root = account + .storage + .map(|storage| { + storage_root_unhashed( + storage + .into_iter() + .filter(|(_, value)| *value != B256::ZERO) + .map(|(slot, value)| (slot, U256::from_be_bytes(*value))), + ) + }) + .unwrap_or(EMPTY_ROOT_HASH); + + Self { + nonce: account.nonce.unwrap_or_default(), + balance: account.balance, + storage_root, + code_hash: account.code.map_or(KECCAK_EMPTY, keccak256), + } + } +} + +impl From<(Account, B256)> for TrieAccount { + fn from((account, storage_root): (Account, B256)) -> Self { + Self { + nonce: account.nonce, + balance: account.balance, + storage_root, + code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + } + } +} + +impl From<(AccountInfo, B256)> for TrieAccount { + fn from((account, storage_root): (AccountInfo, B256)) -> Self { + Self { + nonce: account.nonce, + balance: account.balance, + storage_root, + code_hash: account.code_hash, + } + } +} diff --git a/crates/trie/types/src/hash_builder/mod.rs b/crates/trie/common/src/hash_builder/mod.rs similarity index 100% rename from crates/trie/types/src/hash_builder/mod.rs rename to crates/trie/common/src/hash_builder/mod.rs diff --git a/crates/trie/types/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs similarity index 97% rename from crates/trie/types/src/hash_builder/state.rs rename to crates/trie/common/src/hash_builder/state.rs index c1fa9640fca5..c70d7817e4c8 100644 --- a/crates/trie/types/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -148,6 +148,7 @@ impl Compact for HashBuilderState { mod tests { use super::*; use proptest::prelude::*; + use proptest_arbitrary_interop::arb; #[test] fn hash_builder_state_regression() { @@ -161,7 +162,7 @@ mod tests { proptest! { #[test] - fn hash_builder_state_roundtrip(state: HashBuilderState) { + fn hash_builder_state_roundtrip(state in arb::()) { let mut buf = vec![]; let len = state.clone().to_compact(&mut buf); let (decoded, _) = HashBuilderState::from_compact(&buf, len); diff --git a/crates/trie/types/src/hash_builder/value.rs b/crates/trie/common/src/hash_builder/value.rs similarity index 100% rename from crates/trie/types/src/hash_builder/value.rs rename to crates/trie/common/src/hash_builder/value.rs diff --git a/crates/trie/types/src/lib.rs b/crates/trie/common/src/lib.rs similarity index 86% rename from crates/trie/types/src/lib.rs rename to crates/trie/common/src/lib.rs index 0b4927b9c8ac..f845c9ca5cd6 100644 --- a/crates/trie/types/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] /// The implementation of hash builder. @@ -31,4 +29,11 @@ pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; +mod proofs; +#[cfg(any(test, feature = "test-utils"))] +pub use proofs::triehash; +pub use proofs::{AccountProof, StorageProof}; + +pub mod root; + pub use alloy_trie::{proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/trie/types/src/mask.rs b/crates/trie/common/src/mask.rs similarity index 100% rename from crates/trie/types/src/mask.rs rename to crates/trie/common/src/mask.rs diff --git a/crates/trie/types/src/nibbles.rs b/crates/trie/common/src/nibbles.rs similarity index 100% rename from crates/trie/types/src/nibbles.rs rename to crates/trie/common/src/nibbles.rs diff --git a/crates/trie/types/src/nodes/branch.rs b/crates/trie/common/src/nodes/branch.rs similarity index 100% rename from crates/trie/types/src/nodes/branch.rs rename to crates/trie/common/src/nodes/branch.rs diff --git a/crates/trie/types/src/nodes/mod.rs b/crates/trie/common/src/nodes/mod.rs similarity index 100% rename from crates/trie/types/src/nodes/mod.rs rename to crates/trie/common/src/nodes/mod.rs diff --git a/crates/primitives/src/proofs/types.rs b/crates/trie/common/src/proofs.rs similarity index 79% rename from crates/primitives/src/proofs/types.rs rename to crates/trie/common/src/proofs.rs index f2225df7942e..11953a48decf 100644 --- a/crates/primitives/src/proofs/types.rs +++ b/crates/trie/common/src/proofs.rs @@ -1,12 +1,13 @@ //! Merkle trie proofs. -use super::{traits::IntoTrieAccount, Nibbles}; -use crate::{keccak256, Account, Address, Bytes, B256, U256}; +use crate::{Nibbles, TrieAccount}; +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rlp::encode_fixed_size; use alloy_trie::{ proof::{verify_proof, ProofVerificationError}, EMPTY_ROOT_HASH, }; +use reth_primitives_traits::Account; /// The merkle proof with the relevant account info. #[derive(PartialEq, Eq, Debug)] @@ -26,7 +27,7 @@ pub struct AccountProof { impl AccountProof { /// Create new account proof entity. - pub fn new(address: Address) -> Self { + pub const fn new(address: Address) -> Self { Self { address, info: None, @@ -64,7 +65,7 @@ impl AccountProof { let expected = if self.info.is_none() && self.storage_root == EMPTY_ROOT_HASH { None } else { - Some(alloy_rlp::encode(IntoTrieAccount::to_trie_account(( + Some(alloy_rlp::encode(TrieAccount::from(( self.info.unwrap_or_default(), self.storage_root, )))) @@ -122,3 +123,29 @@ impl StorageProof { verify_proof(root, self.nibbles.clone(), expected, &self.proof) } } + +/// Implementation of hasher using our keccak256 hashing function +/// for compatibility with `triehash` crate. +#[cfg(any(test, feature = "test-utils"))] +pub mod triehash { + use alloy_primitives::{keccak256, B256}; + use hash_db::Hasher; + use plain_hasher::PlainHasher; + + /// A [Hasher] that calculates a keccak256 hash of the given data. + #[derive(Default, Debug, Clone, PartialEq, Eq)] + #[non_exhaustive] + pub struct KeccakHasher; + + #[cfg(any(test, feature = "test-utils"))] + impl Hasher for KeccakHasher { + type Out = B256; + type StdHasher = PlainHasher; + + const LENGTH: usize = 32; + + fn hash(x: &[u8]) -> Self::Out { + keccak256(x) + } + } +} diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs new file mode 100644 index 000000000000..434eea20b59c --- /dev/null +++ b/crates/trie/common/src/root.rs @@ -0,0 +1,117 @@ +//! Common root computation functions. + +use crate::TrieAccount; +use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_rlp::Encodable; +use alloy_trie::HashBuilder; +use itertools::Itertools; +use nybbles::Nibbles; + +/// Adjust the index of an item for rlp encoding. +pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { + if i > 0x7f { + i + } else if i == 0x7f || i + 1 == len { + 0 + } else { + i + 1 + } +} + +/// Compute a trie root of the collection of rlp encodable items. +pub fn ordered_trie_root(items: &[T]) -> B256 { + ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf)) +} + +/// Compute a trie root of the collection of items with a custom encoder. +pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 +where + F: FnMut(&T, &mut Vec), +{ + let mut value_buffer = Vec::new(); + + let mut hb = HashBuilder::default(); + let items_len = items.len(); + for i in 0..items_len { + let index = adjust_index_for_rlp(i, items_len); + + let index_buffer = alloy_rlp::encode_fixed_size(&index); + + value_buffer.clear(); + encode(&items[index], &mut value_buffer); + + hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); + } + + hb.root() +} + +/// Hashes and sorts account keys, then proceeds to calculating the root hash of the state +/// represented as MPT. +/// See [`state_root_unsorted`] for more info. +pub fn state_root_ref_unhashed<'a, A: Into + Clone + 'a>( + state: impl IntoIterator, +) -> B256 { + state_root_unsorted( + state.into_iter().map(|(address, account)| (keccak256(address), account.clone())), + ) +} + +/// Hashes and sorts account keys, then proceeds to calculating the root hash of the state +/// represented as MPT. +/// See [`state_root_unsorted`] for more info. +pub fn state_root_unhashed>( + state: impl IntoIterator, +) -> B256 { + state_root_unsorted(state.into_iter().map(|(address, account)| (keccak256(address), account))) +} + +/// Sorts the hashed account keys and calculates the root hash of the state represented as MPT. +/// See [`state_root`] for more info. +pub fn state_root_unsorted>( + state: impl IntoIterator, +) -> B256 { + state_root(state.into_iter().sorted_by_key(|(key, _)| *key)) +} + +/// Calculates the root hash of the state represented as MPT. +/// Corresponds to [geth's `deriveHash`](https://github.com/ethereum/go-ethereum/blob/6c149fd4ad063f7c24d726a73bc0546badd1bc73/core/genesis.go#L119). +/// +/// # Panics +/// +/// If the items are not in sorted order. +pub fn state_root>(state: impl IntoIterator) -> B256 { + let mut hb = HashBuilder::default(); + let mut account_rlp_buf = Vec::new(); + for (hashed_key, account) in state { + account_rlp_buf.clear(); + account.into().encode(&mut account_rlp_buf); + hb.add_leaf(Nibbles::unpack(hashed_key), &account_rlp_buf); + } + hb.root() +} + +/// Hashes storage keys, sorts them and them calculates the root hash of the storage trie. +/// See [`storage_root_unsorted`] for more info. +pub fn storage_root_unhashed(storage: impl IntoIterator) -> B256 { + storage_root_unsorted(storage.into_iter().map(|(slot, value)| (keccak256(slot), value))) +} + +/// Sorts and calculates the root hash of account storage trie. +/// See [`storage_root`] for more info. +pub fn storage_root_unsorted(storage: impl IntoIterator) -> B256 { + storage_root(storage.into_iter().sorted_by_key(|(key, _)| *key)) +} + +/// Calculates the root hash of account storage trie. +/// +/// # Panics +/// +/// If the items are not in sorted order. +pub fn storage_root(storage: impl IntoIterator) -> B256 { + let mut hb = HashBuilder::default(); + for (hashed_slot, value) in storage { + hb.add_leaf(Nibbles::unpack(hashed_slot), alloy_rlp::encode_fixed_size(&value).as_ref()); + } + hb.root() +} diff --git a/crates/trie/types/src/storage.rs b/crates/trie/common/src/storage.rs similarity index 100% rename from crates/trie/types/src/storage.rs rename to crates/trie/common/src/storage.rs diff --git a/crates/trie/types/src/subnode.rs b/crates/trie/common/src/subnode.rs similarity index 100% rename from crates/trie/types/src/subnode.rs rename to crates/trie/common/src/subnode.rs diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 6a55091415d8..36b7cbdc4a28 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -54,6 +54,7 @@ tokio = { workspace = true, default-features = false, features = ["sync", "rt", rayon.workspace = true criterion = { workspace = true, features = ["async_tokio"] } proptest.workspace = true +proptest-arbitrary-interop.workspace = true [features] default = ["metrics", "async", "parallel"] diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index eba1cdd3daaf..6a7d7a81cc37 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -1,6 +1,7 @@ #![allow(missing_docs, unreachable_pub)] use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use proptest_arbitrary_interop::arb; use rayon::ThreadPoolBuilder; use reth_primitives::{Account, B256, U256}; use reth_provider::{ @@ -29,7 +30,7 @@ pub fn calculate_state_root(c: &mut Criterion) { HashedStateChanges(db_state).write_to_db(provider_rw.tx_ref()).unwrap(); let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap(); - updates.flush(provider_rw.tx_ref()).unwrap(); + updates.write_to_database(provider_rw.tx_ref()).unwrap(); provider_rw.commit().unwrap(); } @@ -40,7 +41,7 @@ pub fn calculate_state_root(c: &mut Criterion) { b.to_async(&runtime).iter_with_setup( || { let sorted_state = updated_state.clone().into_sorted(); - let prefix_sets = updated_state.construct_prefix_sets(); + let prefix_sets = updated_state.construct_prefix_sets().freeze(); let provider = provider_factory.provider().unwrap(); (provider, sorted_state, prefix_sets) }, @@ -82,7 +83,7 @@ fn generate_test_data(size: usize) -> (HashedPostState, HashedPostState) { let db_state = hash_map( any::(), ( - any::().prop_filter("non empty account", |a| !a.is_empty()), + arb::().prop_filter("non empty account", |a| !a.is_empty()), hash_map( any::(), any::().prop_filter("non zero value", |v| !v.is_zero()), diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs index 7441bd0a02f4..db6152b6a2cf 100644 --- a/crates/trie/parallel/src/async_root.rs +++ b/crates/trie/parallel/src/async_root.rs @@ -3,7 +3,7 @@ use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_db_api::database::Database; use reth_execution_errors::StorageRootError; -use reth_primitives::{proofs::IntoTrieAccount, B256}; +use reth_primitives::B256; use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ @@ -12,7 +12,7 @@ use reth_trie::{ trie_cursor::TrieCursorFactory, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, HashedPostState, Nibbles, StorageRoot, + HashBuilder, HashedPostState, Nibbles, StorageRoot, TrieAccount, }; use std::{collections::HashMap, sync::Arc}; use thiserror::Error; @@ -86,7 +86,7 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), AsyncStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let prefix_sets = self.hashed_state.construct_prefix_sets(); + let prefix_sets = self.hashed_state.construct_prefix_sets().freeze(); let storage_root_targets = StorageRootTargets::new( self.hashed_state.accounts.keys().copied(), prefix_sets.storage_prefix_sets, @@ -132,7 +132,7 @@ where trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, prefix_sets.account_prefix_set, ) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, @@ -166,11 +166,11 @@ where }; if retain_updates { - trie_updates.extend(updates.into_iter()); + trie_updates.insert_storage_updates(hashed_address, updates); } account_rlp.clear(); - let account = IntoTrieAccount::to_trie_account((account, storage_root)); + let account = TrieAccount::from((account, storage_root)); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); } @@ -179,7 +179,7 @@ where let root = hash_builder.root(); - trie_updates.finalize_state_updates( + trie_updates.finalize( account_node_iter.walker, hash_builder, prefix_sets.destroyed_accounts, diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index f814e2ff88cf..0983fd47e5a3 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -3,7 +3,7 @@ use alloy_rlp::{BufMut, Encodable}; use rayon::prelude::*; use reth_db_api::database::Database; use reth_execution_errors::StorageRootError; -use reth_primitives::{proofs::IntoTrieAccount, B256}; +use reth_primitives::B256; use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -11,7 +11,7 @@ use reth_trie::{ trie_cursor::TrieCursorFactory, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, HashedPostState, Nibbles, StorageRoot, + HashBuilder, HashedPostState, Nibbles, StorageRoot, TrieAccount, }; use std::collections::HashMap; use thiserror::Error; @@ -77,7 +77,7 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let prefix_sets = self.hashed_state.construct_prefix_sets(); + let prefix_sets = self.hashed_state.construct_prefix_sets().freeze(); let storage_root_targets = StorageRootTargets::new( self.hashed_state.accounts.keys().copied(), prefix_sets.storage_prefix_sets, @@ -116,7 +116,7 @@ where trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, prefix_sets.account_prefix_set, ) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, @@ -148,11 +148,11 @@ where }; if retain_updates { - trie_updates.extend(updates.into_iter()); + trie_updates.insert_storage_updates(hashed_address, updates); } account_rlp.clear(); - let account = IntoTrieAccount::to_trie_account((account, storage_root)); + let account = TrieAccount::from((account, storage_root)); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); } @@ -161,7 +161,7 @@ where let root = hash_builder.root(); - trie_updates.finalize_state_updates( + trie_updates.finalize( account_node_iter.walker, hash_builder, prefix_sets.destroyed_accounts, diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 99beeff07004..04b03014e33f 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -18,7 +18,7 @@ reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-stages-types.workspace = true -reth-trie-types.workspace = true +reth-trie-common.workspace = true revm.workspace = true @@ -40,18 +40,24 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { version = "0.8", optional = true } +# `serde` feature +serde = { workspace = true, optional = true } + [dev-dependencies] # reth +reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-storage-errors.workspace = true +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } # trie triehash = "0.8" # misc proptest.workspace = true +proptest-arbitrary-interop.workspace = true tokio = { workspace = true, default-features = false, features = [ "sync", "rt", @@ -65,7 +71,8 @@ criterion.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] -test-utils = ["triehash"] +serde = ["dep:serde"] +test-utils = ["triehash", "reth-trie-common/test-utils"] [[bench]] name = "prefix_set" @@ -74,3 +81,8 @@ harness = false [[bench]] name = "hash_post_state" harness = false + +[[bench]] +name = "trie_root" +required-features = ["test-utils"] +harness = false diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index dced866cf31f..636ce4462173 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -1,7 +1,7 @@ #![allow(missing_docs, unreachable_pub)] use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_primitives::{keccak256, revm::compat::into_reth_acc, Address, B256, U256}; +use reth_primitives::{keccak256, Address, B256, U256}; use reth_trie::{HashedPostState, HashedStorage}; use revm::db::{states::BundleBuilder, BundleAccount}; use std::collections::HashMap; @@ -30,7 +30,7 @@ fn from_bundle_state_seq(state: &HashMap) -> HashedPostS for (address, account) in state { let hashed_address = keccak256(address); - this.accounts.insert(hashed_address, account.info.clone().map(into_reth_acc)); + this.accounts.insert(hashed_address, account.info.clone().map(Into::into)); let hashed_storage = HashedStorage::from_iter( account.status.was_destroyed(), diff --git a/crates/primitives/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs similarity index 89% rename from crates/primitives/benches/trie_root.rs rename to crates/trie/trie/benches/trie_root.rs index b61e3aa8541d..3f7efecc3a1f 100644 --- a/crates/primitives/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -1,7 +1,9 @@ #![allow(missing_docs, unreachable_pub)] use criterion::{black_box, criterion_group, criterion_main, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_primitives::{proofs::triehash::KeccakHasher, ReceiptWithBloom, B256}; +use proptest_arbitrary_interop::arb; +use reth_primitives::{ReceiptWithBloom, B256}; +use reth_trie::triehash::KeccakHasher; /// Benchmarks different implementations of the root calculation. pub fn trie_root_benchmark(c: &mut Criterion) { @@ -25,7 +27,7 @@ pub fn trie_root_benchmark(c: &mut Criterion) { } fn generate_test_data(size: usize) -> Vec { - prop::collection::vec(any::(), size) + prop::collection::vec(arb::(), size) .new_tree(&mut TestRunner::new(ProptestConfig::default())) .unwrap() .current() @@ -41,8 +43,7 @@ criterion_main!(benches); mod implementations { use super::*; use alloy_rlp::Encodable; - use reth_primitives::proofs::adjust_index_for_rlp; - use reth_trie_types::{HashBuilder, Nibbles}; + use reth_trie_common::{root::adjust_index_for_rlp, HashBuilder, Nibbles}; pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { triehash::ordered_trie_root::(receipts.iter().map(|receipt| { diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs new file mode 100644 index 000000000000..1f14a462b1ef --- /dev/null +++ b/crates/trie/trie/src/forward_cursor.rs @@ -0,0 +1,51 @@ +/// The implementation of forward-only in memory cursor over the entries. +/// The cursor operates under the assumption that the supplied collection is pre-sorted. +#[derive(Debug)] +pub struct ForwardInMemoryCursor<'a, K, V> { + /// The reference to the pre-sorted collection of entries. + entries: &'a Vec<(K, V)>, + /// The index where cursor is currently positioned. + index: usize, +} + +impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { + /// Create new forward cursor positioned at the beginning of the collection. + /// The cursor expects all of the entries have been sorted in advance. + pub const fn new(entries: &'a Vec<(K, V)>) -> Self { + Self { entries, index: 0 } + } + + /// Returns `true` if the cursor is empty, regardless of its position. + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } +} + +impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> +where + K: PartialOrd + Copy, + V: Copy, +{ + /// Advances the cursor forward while `comparator` returns `true` or until the collection is + /// exhausted. Returns the first entry for which `comparator` returns `false` or `None`. + fn advance_while_false(&mut self, comparator: impl Fn(&K) -> bool) -> Option<(K, V)> { + let mut entry = self.entries.get(self.index); + while entry.map_or(false, |entry| comparator(&entry.0)) { + self.index += 1; + entry = self.entries.get(self.index); + } + entry.copied() + } + + /// Returns the first entry from the current cursor position that's greater or equal to the + /// provided key. This method advances the cursor forward. + pub fn seek(&mut self, key: &K) -> Option<(K, V)> { + self.advance_while_false(|k| k < key) + } + + /// Returns the first entry from the current cursor position that's greater than the provided + /// key. This method advances the cursor forward. + pub fn first_after(&mut self, key: &K) -> Option<(K, V)> { + self.advance_while_false(|k| k <= key) + } +} diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 61b1e093b3d6..ac262f3d44fc 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -1,6 +1,11 @@ use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; -use crate::state::HashedPostStateSorted; +use crate::{ + forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, + HashedStorageSorted, +}; +use reth_db::DatabaseError; use reth_primitives::{Account, B256, U256}; +use std::collections::HashSet; /// The hashed cursor factory for the post state. #[derive(Debug, Clone)] @@ -20,39 +25,44 @@ impl<'a, CF: HashedCursorFactory> HashedCursorFactory for HashedPostStateCursorF type AccountCursor = HashedPostStateAccountCursor<'a, CF::AccountCursor>; type StorageCursor = HashedPostStateStorageCursor<'a, CF::StorageCursor>; - fn hashed_account_cursor(&self) -> Result { + fn hashed_account_cursor(&self) -> Result { let cursor = self.cursor_factory.hashed_account_cursor()?; - Ok(HashedPostStateAccountCursor::new(cursor, self.post_state)) + Ok(HashedPostStateAccountCursor::new(cursor, &self.post_state.accounts)) } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result { let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; - Ok(HashedPostStateStorageCursor::new(cursor, self.post_state, hashed_address)) + Ok(HashedPostStateStorageCursor::new(cursor, self.post_state.storages.get(&hashed_address))) } } /// The cursor to iterate over post state hashed accounts and corresponding database entries. /// It will always give precedence to the data from the hashed post state. -#[derive(Debug, Clone)] -pub struct HashedPostStateAccountCursor<'b, C> { +#[derive(Debug)] +pub struct HashedPostStateAccountCursor<'a, C> { /// The database cursor. cursor: C, - /// The reference to the in-memory [`HashedPostStateSorted`]. - post_state: &'b HashedPostStateSorted, - /// The post state account index where the cursor is currently at. - post_state_account_index: usize, + /// Forward-only in-memory cursor over accounts. + post_state_cursor: ForwardInMemoryCursor<'a, B256, Account>, + /// Reference to the collection of account keys that were destroyed. + destroyed_accounts: &'a HashSet, /// The last hashed account that was returned by the cursor. /// De facto, this is a current cursor position. last_account: Option, } -impl<'b, C> HashedPostStateAccountCursor<'b, C> { +impl<'a, C> HashedPostStateAccountCursor<'a, C> +where + C: HashedCursor, +{ /// Create new instance of [`HashedPostStateAccountCursor`]. - pub const fn new(cursor: C, post_state: &'b HashedPostStateSorted) -> Self { - Self { cursor, post_state, last_account: None, post_state_account_index: 0 } + pub const fn new(cursor: C, post_state_accounts: &'a HashedAccountsSorted) -> Self { + let post_state_cursor = ForwardInMemoryCursor::new(&post_state_accounts.accounts); + let destroyed_accounts = &post_state_accounts.destroyed_accounts; + Self { cursor, post_state_cursor, destroyed_accounts, last_account: None } } /// Returns `true` if the account has been destroyed. @@ -61,34 +71,67 @@ impl<'b, C> HashedPostStateAccountCursor<'b, C> { /// This function only checks the post state, not the database, because the latter does not /// store destroyed accounts. fn is_account_cleared(&self, account: &B256) -> bool { - self.post_state.destroyed_accounts.contains(account) + self.destroyed_accounts.contains(account) + } + + fn seek_inner(&mut self, key: B256) -> Result, DatabaseError> { + // Take the next account from the post state with the key greater than or equal to the + // sought key. + let post_state_entry = self.post_state_cursor.seek(&key); + + // It's an exact match, return the account from post state without looking up in the + // database. + if post_state_entry.map_or(false, |entry| entry.0 == key) { + return Ok(post_state_entry) + } + + // It's not an exact match, reposition to the first greater or equal account that wasn't + // cleared. + let mut db_entry = self.cursor.seek(key)?; + while db_entry.as_ref().map_or(false, |(address, _)| self.is_account_cleared(address)) { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) + } + + fn next_inner(&mut self, last_account: B256) -> Result, DatabaseError> { + // Take the next account from the post state with the key greater than the last sought key. + let post_state_entry = self.post_state_cursor.first_after(&last_account); + + // If post state was given precedence or account was cleared, move the cursor forward. + let mut db_entry = self.cursor.seek(last_account)?; + while db_entry.as_ref().map_or(false, |(address, _)| { + address <= &last_account || self.is_account_cleared(address) + }) { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) } /// Return the account with the lowest hashed account key. /// /// Given the next post state and database entries, return the smallest of the two. /// If the account keys are the same, the post state entry is given precedence. - fn next_account( - post_state_item: Option<&(B256, Account)>, + fn compare_entries( + post_state_item: Option<(B256, Account)>, db_item: Option<(B256, Account)>, ) -> Option<(B256, Account)> { - match (post_state_item, db_item) { + if let Some((post_state_entry, db_entry)) = post_state_item.zip(db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal - (Some((post_state_address, post_state_account)), Some((db_address, db_account))) => { - if post_state_address <= &db_address { - Some((*post_state_address, *post_state_account)) - } else { - Some((db_address, db_account)) - } - } + Some(if post_state_entry.0 <= db_entry.0 { post_state_entry } else { db_entry }) + } else { // Return either non-empty entry - _ => post_state_item.copied().or(db_item), + db_item.or(post_state_item) } } } -impl<'b, C> HashedCursor for HashedPostStateAccountCursor<'b, C> +impl<'a, C> HashedCursor for HashedPostStateAccountCursor<'a, C> where C: HashedCursor, { @@ -102,41 +145,11 @@ where /// /// The returned account key is memoized and the cursor remains positioned at that key until /// [`HashedCursor::seek`] or [`HashedCursor::next`] are called. - fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { - self.last_account = None; - - // Take the next account from the post state with the key greater than or equal to the - // sought key. - let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - while post_state_entry.map(|(k, _)| k < &key).unwrap_or_default() { - self.post_state_account_index += 1; - post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - } - - // It's an exact match, return the account from post state without looking up in the - // database. - if let Some((address, account)) = post_state_entry { - if address == &key { - self.last_account = Some(*address); - return Ok(Some((*address, *account))); - } - } - - // It's not an exact match, reposition to the first greater or equal account that wasn't - // cleared. - let mut db_entry = self.cursor.seek(key)?; - while db_entry - .as_ref() - .map(|(address, _)| self.is_account_cleared(address)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; - } - - // Compare two entries and return the lowest. - let result = Self::next_account(post_state_entry, db_entry); - self.last_account = result.as_ref().map(|(address, _)| *address); - Ok(result) + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + // Find the closes account. + let entry = self.seek_inner(key)?; + self.last_account = entry.as_ref().map(|entry| entry.0); + Ok(entry) } /// Retrieve the next entry from the cursor. @@ -146,208 +159,151 @@ where /// /// NOTE: This function will not return any entry unless [`HashedCursor::seek`] has been /// called. - fn next(&mut self) -> Result, reth_db::DatabaseError> { - let last_account = match self.last_account.as_ref() { - Some(account) => account, - None => return Ok(None), // no previous entry was found + fn next(&mut self) -> Result, DatabaseError> { + let next = match self.last_account { + Some(account) => { + let entry = self.next_inner(account)?; + self.last_account = entry.as_ref().map(|entry| entry.0); + entry + } + // no previous entry was found + None => None, }; - - // If post state was given precedence, move the cursor forward. - let mut db_entry = self.cursor.seek(*last_account)?; - while db_entry - .as_ref() - .map(|(address, _)| address <= last_account || self.is_account_cleared(address)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; - } - - // Take the next account from the post state with the key greater than the last sought key. - let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - while post_state_entry.map(|(k, _)| k <= last_account).unwrap_or_default() { - self.post_state_account_index += 1; - post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - } - - // Compare two entries and return the lowest. - let result = Self::next_account(post_state_entry, db_entry); - self.last_account = result.as_ref().map(|(address, _)| *address); - Ok(result) + Ok(next) } } /// The cursor to iterate over post state hashed storages and corresponding database entries. /// It will always give precedence to the data from the post state. -#[derive(Debug, Clone)] -pub struct HashedPostStateStorageCursor<'b, C> { +#[derive(Debug)] +pub struct HashedPostStateStorageCursor<'a, C> { /// The database cursor. cursor: C, - /// The reference to the post state. - post_state: &'b HashedPostStateSorted, - /// The current hashed account key. - hashed_address: B256, - /// The post state index where the cursor is currently at. - post_state_storage_index: usize, + /// Forward-only in-memory cursor over non zero-valued account storage slots. + post_state_cursor: Option>, + /// Reference to the collection of storage slot keys that were cleared. + cleared_slots: Option<&'a HashSet>, + /// Flag indicating whether database storage was wiped. + storage_wiped: bool, /// The last slot that has been returned by the cursor. /// De facto, this is the cursor's position for the given account key. last_slot: Option, } -impl<'b, C> HashedPostStateStorageCursor<'b, C> { +impl<'a, C> HashedPostStateStorageCursor<'a, C> +where + C: HashedStorageCursor, +{ /// Create new instance of [`HashedPostStateStorageCursor`] for the given hashed address. - pub const fn new( - cursor: C, - post_state: &'b HashedPostStateSorted, - hashed_address: B256, - ) -> Self { - Self { cursor, post_state, hashed_address, last_slot: None, post_state_storage_index: 0 } - } - - /// Returns `true` if the storage for the given - /// The database is not checked since it already has no wiped storage entries. - fn is_db_storage_wiped(&self) -> bool { - match self.post_state.storages.get(&self.hashed_address) { - Some(storage) => storage.wiped, - None => false, - } + pub fn new(cursor: C, post_state_storage: Option<&'a HashedStorageSorted>) -> Self { + let post_state_cursor = + post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots)); + let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots); + let storage_wiped = post_state_storage.map_or(false, |s| s.wiped); + Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None } } /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. fn is_slot_zero_valued(&self, slot: &B256) -> bool { - self.post_state - .storages - .get(&self.hashed_address) - .map(|storage| storage.zero_valued_slots.contains(slot)) - .unwrap_or_default() + self.cleared_slots.map_or(false, |s| s.contains(slot)) + } + + /// Find the storage entry in post state or database that's greater or equal to provided subkey. + fn seek_inner(&mut self, subkey: B256) -> Result, DatabaseError> { + // Attempt to find the account's storage in post state. + let post_state_entry = self.post_state_cursor.as_mut().and_then(|c| c.seek(&subkey)); + + // If database storage was wiped or it's an exact match, + // return the storage slot from post state without looking up in the database. + if self.storage_wiped || post_state_entry.map_or(false, |entry| entry.0 == subkey) { + return Ok(post_state_entry) + } + + // It's not an exact match and storage was not wiped, + // reposition to the first greater or equal account. + let mut db_entry = self.cursor.seek(subkey)?; + while db_entry.as_ref().map_or(false, |entry| self.is_slot_zero_valued(&entry.0)) { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) + } + + /// Find the storage entry that is right after current cursor position. + fn next_inner(&mut self, last_slot: B256) -> Result, DatabaseError> { + // Attempt to find the account's storage in post state. + let post_state_entry = + self.post_state_cursor.as_mut().and_then(|c| c.first_after(&last_slot)); + + // Return post state entry immediately if database was wiped. + if self.storage_wiped { + return Ok(post_state_entry) + } + + // If post state was given precedence, move the cursor forward. + // If the entry was already returned or is zero-valued, move to the next. + let mut db_entry = self.cursor.seek(last_slot)?; + while db_entry + .as_ref() + .map_or(false, |entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) + { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) } /// Return the storage entry with the lowest hashed storage key (hashed slot). /// /// Given the next post state and database entries, return the smallest of the two. /// If the storage keys are the same, the post state entry is given precedence. - fn next_slot( - post_state_item: Option<&(B256, U256)>, + fn compare_entries( + post_state_item: Option<(B256, U256)>, db_item: Option<(B256, U256)>, ) -> Option<(B256, U256)> { - match (post_state_item, db_item) { + if let Some((post_state_entry, db_entry)) = post_state_item.zip(db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal - (Some((post_state_slot, post_state_value)), Some((db_slot, db_value))) => { - if post_state_slot <= &db_slot { - Some((*post_state_slot, *post_state_value)) - } else { - Some((db_slot, db_value)) - } - } + Some(if post_state_entry.0 <= db_entry.0 { post_state_entry } else { db_entry }) + } else { // Return either non-empty entry - _ => db_item.or_else(|| post_state_item.copied()), + db_item.or(post_state_item) } } } -impl<'b, C> HashedCursor for HashedPostStateStorageCursor<'b, C> +impl<'a, C> HashedCursor for HashedPostStateStorageCursor<'a, C> where C: HashedStorageCursor, { type Value = U256; /// Seek the next account storage entry for a given hashed key pair. - fn seek( - &mut self, - subkey: B256, - ) -> Result, reth_db::DatabaseError> { - // Attempt to find the account's storage in post state. - let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - - while post_state_entry.map(|(slot, _)| slot < &subkey).unwrap_or_default() { - self.post_state_storage_index += 1; - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - } - } - - // It's an exact match, return the storage slot from post state without looking up in - // the database. - if let Some((slot, value)) = post_state_entry { - if slot == &subkey { - self.last_slot = Some(*slot); - return Ok(Some((*slot, *value))); - } - } - - // It's not an exact match, reposition to the first greater or equal account. - let db_entry = if self.is_db_storage_wiped() { - None - } else { - let mut db_entry = self.cursor.seek(subkey)?; - - while db_entry - .as_ref() - .map(|entry| self.is_slot_zero_valued(&entry.0)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; - } - - db_entry - }; - - // Compare two entries and return the lowest. - let result = Self::next_slot(post_state_entry, db_entry); - self.last_slot = result.as_ref().map(|entry| entry.0); - Ok(result) + fn seek(&mut self, subkey: B256) -> Result, DatabaseError> { + let entry = self.seek_inner(subkey)?; + self.last_slot = entry.as_ref().map(|entry| entry.0); + Ok(entry) } /// Return the next account storage entry for the current account key. - /// - /// # Panics - /// - /// If the account key is not set. [`HashedCursor::seek`] must be called first in order to - /// position the cursor. - fn next(&mut self) -> Result, reth_db::DatabaseError> { - let last_slot = match self.last_slot.as_ref() { - Some(slot) => slot, - None => return Ok(None), // no previous entry was found - }; - - let db_entry = if self.is_db_storage_wiped() { - None - } else { - // If post state was given precedence, move the cursor forward. - let mut db_entry = self.cursor.seek(*last_slot)?; - - // If the entry was already returned or is zero-values, move to the next. - while db_entry - .as_ref() - .map(|entry| &entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; + fn next(&mut self) -> Result, DatabaseError> { + let next = match self.last_slot { + Some(last_slot) => { + let entry = self.next_inner(last_slot)?; + self.last_slot = entry.as_ref().map(|entry| entry.0); + entry } - - db_entry + // no previous entry was found + None => None, }; - - // Attempt to find the account's storage in post state. - let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - while post_state_entry.map(|(slot, _)| slot <= last_slot).unwrap_or_default() { - self.post_state_storage_index += 1; - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - } - } - - // Compare two entries and return the lowest. - let result = Self::next_slot(post_state_entry, db_entry); - self.last_slot = result.as_ref().map(|entry| entry.0); - Ok(result) + Ok(next) } } -impl<'b, C> HashedStorageCursor for HashedPostStateStorageCursor<'b, C> +impl<'a, C> HashedStorageCursor for HashedPostStateStorageCursor<'a, C> where C: HashedStorageCursor, { @@ -355,13 +311,13 @@ where /// /// This function should be called before attempting to call [`HashedCursor::seek`] or /// [`HashedCursor::next`]. - fn is_storage_empty(&mut self) -> Result { - let is_empty = match self.post_state.storages.get(&self.hashed_address) { - Some(storage) => { + fn is_storage_empty(&mut self) -> Result { + let is_empty = match &self.post_state_cursor { + Some(cursor) => { // If the storage has been wiped at any point - storage.wiped && + self.storage_wiped && // and the current storage does not contain any non-zero values - storage.non_zero_valued_slots.is_empty() + cursor.is_empty() } None => self.cursor.is_storage_empty()?, }; @@ -374,6 +330,7 @@ mod tests { use super::*; use crate::{HashedPostState, HashedStorage}; use proptest::prelude::*; + use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::create_test_rw_db}; use reth_db_api::{database::Database, transaction::DbTxMut}; use reth_primitives::StorageEntry; @@ -537,7 +494,7 @@ mod tests { #[test] fn fuzz_hashed_account_cursor() { - proptest!(ProptestConfig::with_cases(10), |(db_accounts: BTreeMap, post_state_accounts: BTreeMap>)| { + proptest!(ProptestConfig::with_cases(10), |(db_accounts in arb::>(), post_state_accounts in arb::>>())| { let db = create_test_rw_db(); db.update(|tx| { for (key, account) in &db_accounts { diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 16251dc11903..07af0775705a 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -17,6 +17,9 @@ /// The container indicates when the trie has been modified. pub mod prefix_set; +/// The implementation of forward-only in-memory cursor. +pub mod forward_cursor; + /// The cursor implementations for navigating account and storage tries. pub mod trie_cursor; @@ -51,7 +54,7 @@ pub use progress::{IntermediateStateRootState, StateRootProgress}; pub mod stats; // re-export for convenience -pub use reth_trie_types::*; +pub use reth_trie_common::*; /// Trie calculation metrics. #[cfg(feature = "metrics")] diff --git a/crates/trie/trie/src/prefix_set/mod.rs b/crates/trie/trie/src/prefix_set/mod.rs index 79aaae0c90a7..657695f22985 100644 --- a/crates/trie/trie/src/prefix_set/mod.rs +++ b/crates/trie/trie/src/prefix_set/mod.rs @@ -8,6 +8,35 @@ use std::{ mod loader; pub use loader::PrefixSetLoader; +/// Collection of mutable prefix sets. +#[derive(Default, Debug)] +pub struct TriePrefixSetsMut { + /// A set of account prefixes that have changed. + pub account_prefix_set: PrefixSetMut, + /// A map containing storage changes with the hashed address as key and a set of storage key + /// prefixes as the value. + pub storage_prefix_sets: HashMap, + /// A set of hashed addresses of destroyed accounts. + pub destroyed_accounts: HashSet, +} + +impl TriePrefixSetsMut { + /// Returns a `TriePrefixSets` with the same elements as these sets. + /// + /// If not yet sorted, the elements will be sorted and deduplicated. + pub fn freeze(self) -> TriePrefixSets { + TriePrefixSets { + account_prefix_set: self.account_prefix_set.freeze(), + storage_prefix_sets: self + .storage_prefix_sets + .into_iter() + .map(|(hashed_address, prefix_set)| (hashed_address, prefix_set.freeze())) + .collect(), + destroyed_accounts: self.destroyed_accounts, + } + } +} + /// Collection of trie prefix sets. #[derive(Default, Debug)] pub struct TriePrefixSets { @@ -102,6 +131,15 @@ impl PrefixSetMut { self.keys.push(nibbles); } + /// Extend prefix set keys with contents of provided iterator. + pub fn extend(&mut self, nibbles_iter: I) + where + I: IntoIterator, + { + self.sorted = false; + self.keys.extend(nibbles_iter); + } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.keys.len() @@ -177,6 +215,14 @@ impl PrefixSet { } } +impl<'a> IntoIterator for &'a PrefixSet { + type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; + type Item = &'a reth_trie_common::Nibbles; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index b8786ef1f540..5c6c3b211b84 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -1,7 +1,7 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::PrefixSetMut, + prefix_set::TriePrefixSetsMut, trie_cursor::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, walker::TrieWalker, HashBuilder, Nibbles, @@ -10,13 +10,9 @@ use alloy_rlp::{BufMut, Encodable}; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::{ - constants::EMPTY_ROOT_HASH, - keccak256, - proofs::{AccountProof, IntoTrieAccount, StorageProof}, - Address, B256, -}; -use reth_trie_types::proof::ProofRetainer; +use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; +use reth_trie_common::{proof::ProofRetainer, AccountProof, StorageProof, TrieAccount}; + /// A struct for generating merkle proofs. /// /// Proof generator adds the target address and slots to the prefix set, enables the proof retainer @@ -28,12 +24,32 @@ pub struct Proof<'a, TX, H> { tx: &'a TX, /// The factory for hashed cursors. hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: TriePrefixSetsMut, +} + +impl<'a, TX, H> Proof<'a, TX, H> { + /// Creates a new proof generator. + pub fn new(tx: &'a TX, hashed_cursor_factory: H) -> Self { + Self { tx, hashed_cursor_factory, prefix_sets: TriePrefixSetsMut::default() } + } + + /// Set the hashed cursor factory. + pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof<'a, TX, HF> { + Proof { tx: self.tx, hashed_cursor_factory, prefix_sets: self.prefix_sets } + } + + /// Set the prefix sets. They have to be mutable in order to allow extension with proof target. + pub fn with_prefix_sets_mut(mut self, prefix_sets: TriePrefixSetsMut) -> Self { + self.prefix_sets = prefix_sets; + self + } } impl<'a, TX> Proof<'a, TX, &'a TX> { - /// Create a new [Proof] instance. - pub const fn new(tx: &'a TX) -> Self { - Self { tx, hashed_cursor_factory: tx } + /// Create a new [Proof] instance from database transaction. + pub fn from_tx(tx: &'a TX) -> Self { + Self::new(tx, tx) } } @@ -57,7 +73,7 @@ where DatabaseAccountTrieCursor::new(self.tx.cursor_read::()?); // Create the walker. - let mut prefix_set = PrefixSetMut::default(); + let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); prefix_set.insert(target_nibbles.clone()); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); @@ -83,7 +99,7 @@ where }; account_rlp.clear(); - let account = IntoTrieAccount::to_trie_account((account, storage_root)); + let account = TrieAccount::from((account, storage_root)); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); @@ -122,12 +138,14 @@ where } let target_nibbles = proofs.iter().map(|p| p.nibbles.clone()).collect::>(); - let prefix_set = PrefixSetMut::from(target_nibbles.clone()).freeze(); + let mut prefix_set = + self.prefix_sets.storage_prefix_sets.get(&hashed_address).cloned().unwrap_or_default(); + prefix_set.extend(target_nibbles.clone()); let trie_cursor = DatabaseStorageTrieCursor::new( self.tx.cursor_dup_read::()?, hashed_address, ); - let walker = TrieWalker::new(trie_cursor, prefix_set); + let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); @@ -169,8 +187,9 @@ mod tests { use super::*; use crate::StateRoot; use once_cell::sync::Lazy; + use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; use reth_db_api::database::Database; - use reth_primitives::{Account, Bytes, Chain, ChainSpec, StorageEntry, HOLESKY, MAINNET, U256}; + use reth_primitives::{Account, Bytes, StorageEntry, U256}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter, ProviderFactory}; use reth_storage_errors::provider::ProviderResult; use std::{str::FromStr, sync::Arc}; @@ -230,7 +249,7 @@ mod tests { let (root, updates) = StateRoot::from_tx(provider.tx_ref()) .root_with_updates() .map_err(Into::::into)?; - updates.flush(provider.tx_mut())?; + updates.write_to_database(provider.tx_mut())?; provider.commit()?; @@ -285,7 +304,8 @@ mod tests { let provider = factory.provider().unwrap(); for (target, expected_proof) in data { let target = Address::from_str(target).unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!( account_proof.proof, expected_proof, @@ -305,7 +325,8 @@ mod tests { let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); assert_eq!(slots.len(), account_proof.storage_proofs.len()); @@ -337,7 +358,7 @@ mod tests { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -360,7 +381,7 @@ mod tests { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -446,7 +467,8 @@ mod tests { }; let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); similar_asserts::assert_eq!(account_proof, expected); assert_eq!(account_proof.verify(root), Ok(())); } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 13f91a697dcb..c6c93c0b3620 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -1,6 +1,7 @@ use crate::{ hashed_cursor::HashedPostStateCursorFactory, - prefix_set::{PrefixSetMut, TriePrefixSets}, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + proof::Proof, updates::TrieUpdates, Nibbles, StateRoot, }; @@ -12,9 +13,8 @@ use reth_db_api::{ transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_primitives::{ - keccak256, revm::compat::into_reth_acc, Account, Address, BlockNumber, B256, U256, -}; +use reth_primitives::{keccak256, Account, Address, BlockNumber, B256, U256}; +use reth_trie_common::AccountProof; use revm::db::BundleAccount; use std::{ collections::{hash_map, HashMap, HashSet}, @@ -41,7 +41,7 @@ impl HashedPostState { .into_par_iter() .map(|(address, account)| { let hashed_address = keccak256(address); - let hashed_account = account.info.clone().map(into_reth_acc); + let hashed_account = account.info.clone().map(Into::into); let hashed_storage = HashedStorage::from_iter( account.status.was_destroyed(), account.storage.iter().map(|(key, value)| { @@ -151,16 +151,17 @@ impl HashedPostState { /// Converts hashed post state into [`HashedPostStateSorted`]. pub fn into_sorted(self) -> HashedPostStateSorted { - let mut accounts = Vec::new(); + let mut updated_accounts = Vec::new(); let mut destroyed_accounts = HashSet::default(); for (hashed_address, info) in self.accounts { if let Some(info) = info { - accounts.push((hashed_address, info)); + updated_accounts.push((hashed_address, info)); } else { destroyed_accounts.insert(hashed_address); } } - accounts.sort_unstable_by_key(|(address, _)| *address); + updated_accounts.sort_unstable_by_key(|(address, _)| *address); + let accounts = HashedAccountsSorted { accounts: updated_accounts, destroyed_accounts }; let storages = self .storages @@ -168,13 +169,13 @@ impl HashedPostState { .map(|(hashed_address, storage)| (hashed_address, storage.into_sorted())) .collect(); - HashedPostStateSorted { accounts, destroyed_accounts, storages } + HashedPostStateSorted { accounts, storages } } - /// Construct [`TriePrefixSets`] from hashed post state. + /// Construct [`TriePrefixSetsMut`] from hashed post state. /// The prefix sets contain the hashed account and storage keys that have been changed in the /// post state. - pub fn construct_prefix_sets(&self) -> TriePrefixSets { + pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { // Populate account prefix set. let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); let mut destroyed_accounts = HashSet::default(); @@ -195,14 +196,10 @@ impl HashedPostState { for hashed_slot in hashed_storage.storage.keys() { prefix_set.insert(Nibbles::unpack(hashed_slot)); } - storage_prefix_sets.insert(*hashed_address, prefix_set.freeze()); + storage_prefix_sets.insert(*hashed_address, prefix_set); } - TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - } + TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } } /// Calculate the state root for this [`HashedPostState`]. @@ -237,7 +234,7 @@ impl HashedPostState { /// The state root for this [`HashedPostState`]. pub fn state_root(&self, tx: &TX) -> Result { let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets(); + let prefix_sets = self.construct_prefix_sets().freeze(); StateRoot::from_tx(tx) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) .with_prefix_sets(prefix_sets) @@ -251,12 +248,27 @@ impl HashedPostState { tx: &TX, ) -> Result<(B256, TrieUpdates), StateRootError> { let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets(); + let prefix_sets = self.construct_prefix_sets().freeze(); StateRoot::from_tx(tx) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) .with_prefix_sets(prefix_sets) .root_with_updates() } + + /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. + pub fn account_proof( + &self, + tx: &TX, + address: Address, + slots: &[B256], + ) -> Result { + let sorted = self.clone().into_sorted(); + let prefix_sets = self.construct_prefix_sets(); + Proof::from_tx(tx) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) + .with_prefix_sets_mut(prefix_sets) + .account_proof(address, slots) + } } /// Representation of in-memory hashed storage. @@ -311,12 +323,19 @@ impl HashedStorage { /// Sorted hashed post state optimized for iterating during state trie calculation. #[derive(PartialEq, Eq, Clone, Debug)] pub struct HashedPostStateSorted { + /// Updated state of accounts. + pub(crate) accounts: HashedAccountsSorted, + /// Map of hashed addresses to hashed storage. + pub(crate) storages: HashMap, +} + +/// Sorted account state optimized for iterating during state trie calculation. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct HashedAccountsSorted { /// Sorted collection of hashed addresses and their account info. pub(crate) accounts: Vec<(B256, Account)>, /// Set of destroyed account keys. pub(crate) destroyed_accounts: HashSet, - /// Map of hashed addresses to hashed storage. - pub(crate) storages: HashMap, } /// Sorted hashed storage optimized for iterating during state trie calculation. diff --git a/crates/trie/trie/src/test_utils.rs b/crates/trie/trie/src/test_utils.rs index bd0c1936fbf7..e2fc1f192c32 100644 --- a/crates/trie/trie/src/test_utils.rs +++ b/crates/trie/trie/src/test_utils.rs @@ -1,8 +1,6 @@ use alloy_rlp::encode_fixed_size; -use reth_primitives::{ - proofs::{triehash::KeccakHasher, IntoTrieAccount}, - Account, Address, B256, U256, -}; +use reth_primitives::{Account, Address, B256, U256}; +use reth_trie_common::{triehash::KeccakHasher, TrieAccount}; /// Re-export of [triehash]. pub use triehash; @@ -15,7 +13,7 @@ where { let encoded_accounts = accounts.into_iter().map(|(address, (account, storage))| { let storage_root = storage_root(storage); - let account = IntoTrieAccount::to_trie_account((account, storage_root)); + let account = TrieAccount::from((account, storage_root)); (address, alloy_rlp::encode(account)) }); triehash::sec_trie_root::(encoded_accounts) @@ -36,7 +34,7 @@ where { let encoded_accounts = accounts.into_iter().map(|(address, (account, storage))| { let storage_root = storage_root_prehashed(storage); - let account = IntoTrieAccount::to_trie_account((account, storage_root)); + let account = TrieAccount::from((account, storage_root)); (address, alloy_rlp::encode(account)) }); diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index f21189e25ffa..6fe1bfd998b0 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -5,16 +5,14 @@ use crate::{ progress::{IntermediateStateRootState, StateRootProgress}, stats::TrieTracker, trie_cursor::TrieCursorFactory, - updates::{TrieKey, TrieOp, TrieUpdates}, + updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, - HashBuilder, Nibbles, + HashBuilder, Nibbles, TrieAccount, }; use alloy_rlp::{BufMut, Encodable}; use reth_db_api::transaction::DbTx; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::{ - constants::EMPTY_ROOT_HASH, keccak256, proofs::IntoTrieAccount, Address, BlockNumber, B256, -}; +use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, BlockNumber, B256}; use std::ops::RangeInclusive; use tracing::{debug, trace}; @@ -223,7 +221,7 @@ where state.walker_stack, self.prefix_sets.account_prefix_set, ) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let node_iter = TrieNodeIter::new(walker, hashed_account_cursor) .with_last_hashed_key(state.last_account_key); (hash_builder, node_iter) @@ -231,7 +229,7 @@ where None => { let hash_builder = HashBuilder::default().with_updates(retain_updates); let walker = TrieWalker::new(trie_cursor, self.prefix_sets.account_prefix_set) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let node_iter = TrieNodeIter::new(walker, hashed_account_cursor); (hash_builder, node_iter) } @@ -239,6 +237,7 @@ where let mut account_rlp = Vec::with_capacity(128); let mut hashed_entries_walked = 0; + let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { match node { TrieElement::Branch(node) => { @@ -275,24 +274,28 @@ where let (root, storage_slots_walked, updates) = storage_root_calculator.root_with_updates()?; hashed_entries_walked += storage_slots_walked; - trie_updates.extend(updates); + // We only walk over hashed address once, so it's safe to insert. + updated_storage_nodes += updates.len(); + trie_updates.insert_storage_updates(hashed_address, updates); root } else { storage_root_calculator.root()? }; account_rlp.clear(); - let account = IntoTrieAccount::to_trie_account((account, storage_root)); + let account = TrieAccount::from((account, storage_root)); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); // Decide if we need to return intermediate progress. - let total_updates_len = trie_updates.len() + - account_node_iter.walker.updates_len() + + let total_updates_len = updated_storage_nodes + + account_node_iter.walker.removed_keys_len() + hash_builder.updates_len(); if retain_updates && total_updates_len as u64 >= self.threshold { - let (walker_stack, walker_updates) = account_node_iter.walker.split(); + let (walker_stack, walker_deleted_keys) = account_node_iter.walker.split(); + trie_updates.removed_nodes.extend(walker_deleted_keys); let (hash_builder, hash_builder_updates) = hash_builder.split(); + trie_updates.account_nodes.extend(hash_builder_updates); let state = IntermediateStateRootState { hash_builder, @@ -300,9 +303,6 @@ where last_account_key: hashed_address, }; - trie_updates.extend(walker_updates); - trie_updates.extend_with_account_updates(hash_builder_updates); - return Ok(StateRootProgress::Progress( Box::new(state), hashed_entries_walked, @@ -315,7 +315,7 @@ where let root = hash_builder.root(); - trie_updates.finalize_state_updates( + trie_updates.finalize( account_node_iter.walker, hash_builder, self.prefix_sets.destroyed_accounts, @@ -454,7 +454,7 @@ where /// # Returns /// /// The storage root and storage trie updates for a given address. - pub fn root_with_updates(self) -> Result<(B256, usize, TrieUpdates), StorageRootError> { + pub fn root_with_updates(self) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { self.calculate(true) } @@ -477,7 +477,7 @@ where pub fn calculate( self, retain_updates: bool, - ) -> Result<(B256, usize, TrieUpdates), StorageRootError> { + ) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { trace!(target: "trie::storage_root", hashed_address = ?self.hashed_address, "calculating storage root"); let mut hashed_storage_cursor = @@ -485,16 +485,13 @@ where // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { - return Ok(( - EMPTY_ROOT_HASH, - 0, - TrieUpdates::from([(TrieKey::StorageTrie(self.hashed_address), TrieOp::Delete)]), - )); + return Ok((EMPTY_ROOT_HASH, 0, StorageTrieUpdates::deleted())) } let mut tracker = TrieTracker::default(); - let trie_cursor = self.trie_cursor_factory.storage_tries_cursor(self.hashed_address)?; - let walker = TrieWalker::new(trie_cursor, self.prefix_set).with_updates(retain_updates); + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; + let walker = + TrieWalker::new(trie_cursor, self.prefix_set).with_deletions_retained(retain_updates); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); @@ -517,12 +514,8 @@ where let root = hash_builder.root(); - let mut trie_updates = TrieUpdates::default(); - trie_updates.finalize_storage_updates( - self.hashed_address, - storage_node_iter.walker, - hash_builder, - ); + let mut trie_updates = StorageTrieUpdates::default(); + trie_updates.finalize(storage_node_iter.walker, hash_builder); let stats = tracker.finish(); @@ -553,15 +546,15 @@ mod tests { BranchNodeCompact, TrieMask, }; use proptest::{prelude::ProptestConfig, proptest}; + use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, transaction::DbTxMut, }; - use reth_primitives::{ - hex_literal::hex, proofs::triehash::KeccakHasher, Account, StorageEntry, U256, - }; + use reth_primitives::{hex_literal::hex, Account, StorageEntry, U256}; use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderRW}; + use reth_trie_common::triehash::KeccakHasher; use std::{ collections::{BTreeMap, HashMap}, ops::Mul, @@ -623,7 +616,7 @@ mod tests { let modified_root = loader.root().unwrap(); // Update the intermediate roots table so that we can run the incremental verification - trie_updates.flush(tx.tx_ref()).unwrap(); + trie_updates.write_to_database(tx.tx_ref(), hashed_address).unwrap(); // 3. Calculate the incremental root let mut storage_changes = PrefixSetMut::default(); @@ -652,7 +645,7 @@ mod tests { #[test] fn arbitrary_storage_root() { - proptest!(ProptestConfig::with_cases(10), |(item: (Address, std::collections::BTreeMap))| { + proptest!(ProptestConfig::with_cases(10), |(item in arb::<(Address, std::collections::BTreeMap)>())| { let (address, storage) = item; let hashed_address = keccak256(address); @@ -762,7 +755,7 @@ mod tests { #[test] fn arbitrary_state_root() { proptest!( - ProptestConfig::with_cases(10), | (state: State) | { + ProptestConfig::with_cases(10), | (state in arb::()) | { test_state_root_with_state(state); } ); @@ -771,7 +764,7 @@ mod tests { #[test] fn arbitrary_state_root_with_progress() { proptest!( - ProptestConfig::with_cases(10), | (state: State) | { + ProptestConfig::with_cases(10), | (state in arb::()) | { let hashed_entries_total = state.len() + state.values().map(|(_, slots)| slots.len()).sum::(); @@ -828,8 +821,7 @@ mod tests { } fn encode_account(account: Account, storage_root: Option) -> Vec { - let account = - IntoTrieAccount::to_trie_account((account, storage_root.unwrap_or(EMPTY_ROOT_HASH))); + let account = TrieAccount::from((account, storage_root.unwrap_or(EMPTY_ROOT_HASH))); let mut account_rlp = Vec::with_capacity(account.length()); account.encode(&mut account_rlp); account_rlp @@ -983,14 +975,7 @@ mod tests { assert_eq!(root, computed_expected_root); // Check account trie - let mut account_updates = trie_updates - .iter() - .filter_map(|(k, v)| match (k, v) { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - account_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + let account_updates = trie_updates.clone().into_sorted().account_nodes; assert_eq!(account_updates.len(), 2); let (nibbles1a, node1a) = account_updates.first().unwrap(); @@ -1010,16 +995,13 @@ mod tests { assert_eq!(node2a.hashes.len(), 1); // Check storage trie - let storage_updates = trie_updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::StorageNode(_, nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - assert_eq!(storage_updates.len(), 1); + let mut updated_storage_trie = + trie_updates.storage_tries.iter().filter(|(_, u)| !u.storage_nodes.is_empty()); + assert_eq!(updated_storage_trie.clone().count(), 1); + let (_, storage_trie_updates) = updated_storage_trie.next().unwrap(); + assert_eq!(storage_trie_updates.storage_nodes.len(), 1); - let (nibbles3, node3) = storage_updates.first().unwrap(); + let (nibbles3, node3) = storage_trie_updates.storage_nodes.iter().next().unwrap(); assert!(nibbles3.is_empty()); assert_eq!(node3.state_mask, TrieMask::new(0b1010)); assert_eq!(node3.tree_mask, TrieMask::new(0b0000)); @@ -1053,14 +1035,7 @@ mod tests { .unwrap(); assert_eq!(root, expected_state_root); - let mut account_updates = trie_updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - account_updates.sort_by(|a, b| a.0.cmp(b.0)); + let account_updates = trie_updates.into_sorted().account_nodes; assert_eq!(account_updates.len(), 2); let (nibbles1b, node1b) = account_updates.first().unwrap(); @@ -1107,19 +1082,11 @@ mod tests { .root_with_updates() .unwrap(); assert_eq!(root, computed_expected_root); - assert_eq!(trie_updates.len(), 7); - assert_eq!(trie_updates.iter().filter(|(_, op)| op.is_update()).count(), 2); + assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); - let account_updates = trie_updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - assert_eq!(account_updates.len(), 1); + assert_eq!(trie_updates.account_nodes.len(), 1); - let (nibbles1c, node1c) = account_updates.first().unwrap(); + let (nibbles1c, node1c) = trie_updates.account_nodes.iter().next().unwrap(); assert_eq!(nibbles1c[..], [0xB]); assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); @@ -1166,19 +1133,15 @@ mod tests { .root_with_updates() .unwrap(); assert_eq!(root, computed_expected_root); - assert_eq!(trie_updates.len(), 6); - assert_eq!(trie_updates.iter().filter(|(_, op)| op.is_update()).count(), 1); // no storage root update - - let account_updates = trie_updates + assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); + assert!(!trie_updates + .storage_tries .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - assert_eq!(account_updates.len(), 1); + .any(|(_, u)| !u.storage_nodes.is_empty() || !u.removed_nodes.is_empty())); // no storage root update - let (nibbles1d, node1d) = account_updates.first().unwrap(); + assert_eq!(trie_updates.account_nodes.len(), 1); + + let (nibbles1d, node1d) = trie_updates.account_nodes.iter().next().unwrap(); assert_eq!(nibbles1d[..], [0xB]); assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); @@ -1202,19 +1165,7 @@ mod tests { let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(expected, got); - - // Check account trie - let account_updates = updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => { - Some((nibbles.0.clone(), node.clone())) - } - _ => None, - }) - .collect::>(); - - assert_trie_updates(&account_updates); + assert_trie_updates(&updates.account_nodes); } #[test] @@ -1226,7 +1177,7 @@ mod tests { let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(expected, got); - updates.flush(tx.tx_ref()).unwrap(); + updates.write_to_database(tx.tx_ref()).unwrap(); // read the account updates from the db let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); @@ -1273,7 +1224,7 @@ mod tests { state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) ); assert_eq!(expected_root, state_root); - trie_updates.flush(tx.tx_ref()).unwrap(); + trie_updates.write_to_database(tx.tx_ref()).unwrap(); } } } @@ -1289,26 +1240,14 @@ mod tests { let (got, _, updates) = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); assert_eq!(expected_root, got); - - // Check account trie - let storage_updates = updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::StorageNode(_, nibbles), TrieOp::Update(node)) => { - Some((nibbles.0.clone(), node.clone())) - } - _ => None, - }) - .collect::>(); - assert_eq!(expected_updates, storage_updates); - - assert_trie_updates(&storage_updates); + assert_eq!(expected_updates, updates); + assert_trie_updates(&updates.storage_nodes); } fn extension_node_storage_trie( tx: &DatabaseProviderRW>>, hashed_address: B256, - ) -> (B256, HashMap) { + ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); @@ -1331,7 +1270,8 @@ mod tests { let root = hb.root(); let (_, updates) = hb.split(); - (root, updates) + let trie_updates = StorageTrieUpdates { storage_nodes: updates, ..Default::default() }; + (root, trie_updates) } fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { diff --git a/crates/trie/trie/src/trie_cursor/database_cursors.rs b/crates/trie/trie/src/trie_cursor/database_cursors.rs index 910ae61b4648..53a64a0b09f1 100644 --- a/crates/trie/trie/src/trie_cursor/database_cursors.rs +++ b/crates/trie/trie/src/trie_cursor/database_cursors.rs @@ -1,5 +1,5 @@ use super::{TrieCursor, TrieCursorFactory}; -use crate::{updates::TrieKey, BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey}; +use crate::{BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey}; use reth_db::{tables, DatabaseError}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -9,18 +9,22 @@ use reth_primitives::B256; /// Implementation of the trie cursor factory for a database transaction. impl<'a, TX: DbTx> TrieCursorFactory for &'a TX { - fn account_trie_cursor(&self) -> Result, DatabaseError> { - Ok(Box::new(DatabaseAccountTrieCursor::new(self.cursor_read::()?))) + type AccountTrieCursor = DatabaseAccountTrieCursor<::Cursor>; + type StorageTrieCursor = + DatabaseStorageTrieCursor<::DupCursor>; + + fn account_trie_cursor(&self) -> Result { + Ok(DatabaseAccountTrieCursor::new(self.cursor_read::()?)) } - fn storage_tries_cursor( + fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result, DatabaseError> { - Ok(Box::new(DatabaseStorageTrieCursor::new( + ) -> Result { + Ok(DatabaseStorageTrieCursor::new( self.cursor_dup_read::()?, hashed_address, - ))) + )) } } @@ -56,8 +60,8 @@ where } /// Retrieves the current key in the cursor. - fn current(&mut self) -> Result, DatabaseError> { - Ok(self.0.current()?.map(|(k, _)| TrieKey::AccountNode(k))) + fn current(&mut self) -> Result, DatabaseError> { + Ok(self.0.current()?.map(|(k, _)| k.0)) } } @@ -105,8 +109,8 @@ where } /// Retrieves the current value in the storage trie cursor. - fn current(&mut self) -> Result, DatabaseError> { - Ok(self.cursor.current()?.map(|(k, v)| TrieKey::StorageNode(k, v.nibbles))) + fn current(&mut self) -> Result, DatabaseError> { + Ok(self.cursor.current()?.map(|(_, v)| v.nibbles.0)) } } diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs new file mode 100644 index 000000000000..983974da38db --- /dev/null +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -0,0 +1,149 @@ +use super::{TrieCursor, TrieCursorFactory}; +use crate::{ + forward_cursor::ForwardInMemoryCursor, + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, +}; +use reth_db::DatabaseError; +use reth_primitives::B256; +use reth_trie_common::{BranchNodeCompact, Nibbles}; +use std::collections::HashSet; + +/// The trie cursor factory for the trie updates. +#[derive(Debug, Clone)] +pub struct InMemoryTrieCursorFactory<'a, CF> { + /// Underlying trie cursor factory. + cursor_factory: CF, + /// Reference to sorted trie updates. + trie_updates: &'a TrieUpdatesSorted, +} + +impl<'a, CF> InMemoryTrieCursorFactory<'a, CF> { + /// Create a new trie cursor factory. + pub const fn new(cursor_factory: CF, trie_updates: &'a TrieUpdatesSorted) -> Self { + Self { cursor_factory, trie_updates } + } +} + +impl<'a, CF: TrieCursorFactory> TrieCursorFactory for InMemoryTrieCursorFactory<'a, CF> { + type AccountTrieCursor = InMemoryAccountTrieCursor<'a, CF::AccountTrieCursor>; + type StorageTrieCursor = InMemoryStorageTrieCursor<'a, CF::StorageTrieCursor>; + + fn account_trie_cursor(&self) -> Result { + let cursor = self.cursor_factory.account_trie_cursor()?; + Ok(InMemoryAccountTrieCursor::new(cursor, self.trie_updates)) + } + + fn storage_trie_cursor( + &self, + hashed_address: B256, + ) -> Result { + let cursor = self.cursor_factory.storage_trie_cursor(hashed_address)?; + Ok(InMemoryStorageTrieCursor::new( + hashed_address, + cursor, + self.trie_updates.storage_tries.get(&hashed_address), + )) + } +} + +/// The cursor to iterate over account trie updates and corresponding database entries. +/// It will always give precedence to the data from the trie updates. +#[derive(Debug)] +#[allow(dead_code)] +pub struct InMemoryAccountTrieCursor<'a, C> { + /// The database cursor. + cursor: C, + /// Forward-only in-memory cursor over storage trie nodes. + in_memory_cursor: ForwardInMemoryCursor<'a, Nibbles, BranchNodeCompact>, + /// Collection of removed trie nodes. + removed_nodes: &'a HashSet, + /// Last key returned by the cursor. + last_key: Option, +} + +impl<'a, C> InMemoryAccountTrieCursor<'a, C> { + const fn new(cursor: C, trie_updates: &'a TrieUpdatesSorted) -> Self { + let in_memory_cursor = ForwardInMemoryCursor::new(&trie_updates.account_nodes); + Self { + cursor, + in_memory_cursor, + removed_nodes: &trie_updates.removed_nodes, + last_key: None, + } + } +} + +impl<'a, C: TrieCursor> TrieCursor for InMemoryAccountTrieCursor<'a, C> { + fn seek_exact( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn seek( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn current(&mut self) -> Result, DatabaseError> { + unimplemented!() + } +} + +/// The cursor to iterate over storage trie updates and corresponding database entries. +/// It will always give precedence to the data from the trie updates. +#[derive(Debug)] +#[allow(dead_code)] +pub struct InMemoryStorageTrieCursor<'a, C> { + /// The hashed address of the account that trie belongs to. + hashed_address: B256, + /// The database cursor. + cursor: C, + /// Forward-only in-memory cursor over storage trie nodes. + in_memory_cursor: Option>, + /// Reference to the set of removed storage node keys. + removed_nodes: Option<&'a HashSet>, + /// The flag indicating whether the storage trie was cleared. + storage_trie_cleared: bool, + /// Last key returned by the cursor. + last_key: Option, +} + +impl<'a, C> InMemoryStorageTrieCursor<'a, C> { + fn new(hashed_address: B256, cursor: C, updates: Option<&'a StorageTrieUpdatesSorted>) -> Self { + let in_memory_cursor = updates.map(|u| ForwardInMemoryCursor::new(&u.storage_nodes)); + let removed_nodes = updates.map(|u| &u.removed_nodes); + let storage_trie_cleared = updates.map_or(false, |u| u.is_deleted); + Self { + hashed_address, + cursor, + in_memory_cursor, + removed_nodes, + storage_trie_cleared, + last_key: None, + } + } +} + +impl<'a, C: TrieCursor> TrieCursor for InMemoryStorageTrieCursor<'a, C> { + fn seek_exact( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn seek( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn current(&mut self) -> Result, DatabaseError> { + unimplemented!() + } +} diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index aae7e773c690..e5160a5526a1 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -1,7 +1,14 @@ -use crate::{updates::TrieKey, BranchNodeCompact, Nibbles}; +use crate::{BranchNodeCompact, Nibbles}; use reth_db::DatabaseError; use reth_primitives::B256; + +/// Database implementations of trie cursors. mod database_cursors; + +/// In-memory implementations of trie cursors. +mod in_memory; + +/// Cursor for iterating over a subtrie. mod subnode; /// Noop trie cursor implementations. @@ -9,19 +16,25 @@ pub mod noop; pub use self::{ database_cursors::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, + in_memory::*, subnode::CursorSubNode, }; /// Factory for creating trie cursors. pub trait TrieCursorFactory { + /// The account trie cursor type. + type AccountTrieCursor: TrieCursor; + /// The storage trie cursor type. + type StorageTrieCursor: TrieCursor; + /// Create an account trie cursor. - fn account_trie_cursor(&self) -> Result, DatabaseError>; + fn account_trie_cursor(&self) -> Result; /// Create a storage tries cursor. - fn storage_tries_cursor( + fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result, DatabaseError>; + ) -> Result; } /// A cursor for navigating a trie that works with both Tables and DupSort tables. @@ -38,5 +51,5 @@ pub trait TrieCursor: Send + Sync { -> Result, DatabaseError>; /// Get the current entry. - fn current(&mut self) -> Result, DatabaseError>; + fn current(&mut self) -> Result, DatabaseError>; } diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index 46163180b8b9..e49c90613d37 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -1,6 +1,7 @@ use super::{TrieCursor, TrieCursorFactory}; -use crate::{updates::TrieKey, BranchNodeCompact, Nibbles}; +use crate::{BranchNodeCompact, Nibbles}; use reth_db::DatabaseError; +use reth_primitives::B256; /// Noop trie cursor factory. #[derive(Default, Debug)] @@ -8,17 +9,20 @@ use reth_db::DatabaseError; pub struct NoopTrieCursorFactory; impl TrieCursorFactory for NoopTrieCursorFactory { + type AccountTrieCursor = NoopAccountTrieCursor; + type StorageTrieCursor = NoopStorageTrieCursor; + /// Generates a Noop account trie cursor. - fn account_trie_cursor(&self) -> Result, DatabaseError> { - Ok(Box::::default()) + fn account_trie_cursor(&self) -> Result { + Ok(NoopAccountTrieCursor::default()) } /// Generates a Noop storage trie cursor. - fn storage_tries_cursor( + fn storage_trie_cursor( &self, - _hashed_address: reth_primitives::B256, - ) -> Result, DatabaseError> { - Ok(Box::::default()) + _hashed_address: B256, + ) -> Result { + Ok(NoopStorageTrieCursor::default()) } } @@ -45,7 +49,7 @@ impl TrieCursor for NoopAccountTrieCursor { } /// Retrieves the current cursor position within the account trie. - fn current(&mut self) -> Result, DatabaseError> { + fn current(&mut self) -> Result, DatabaseError> { Ok(None) } } @@ -73,7 +77,7 @@ impl TrieCursor for NoopStorageTrieCursor { } /// Retrieves the current cursor position within storage tries. - fn current(&mut self) -> Result, DatabaseError> { + fn current(&mut self) -> Result, DatabaseError> { Ok(None) } } diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index 0056d8a9e4e9..eba5d1963d78 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -2,183 +2,340 @@ use crate::{ walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles, StorageTrieEntry, StoredBranchNode, StoredNibbles, StoredNibblesSubKey, }; -use derive_more::Deref; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, transaction::{DbTx, DbTxMut}, }; use reth_primitives::B256; -use std::collections::{hash_map::IntoIter, HashMap, HashSet}; - -/// The key of a trie node. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum TrieKey { - /// A node in the account trie. - AccountNode(StoredNibbles), - /// A node in the storage trie. - StorageNode(B256, StoredNibblesSubKey), - /// Storage trie of an account. - StorageTrie(B256), -} - -/// The operation to perform on the trie. -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum TrieOp { - /// Delete the node entry. - Delete, - /// Update the node entry with the provided value. - Update(BranchNodeCompact), -} - -impl TrieOp { - /// Returns `true` if the operation is an update. - pub const fn is_update(&self) -> bool { - matches!(self, Self::Update(..)) - } -} +use std::collections::{HashMap, HashSet}; /// The aggregation of trie updates. -#[derive(Debug, Default, Clone, PartialEq, Eq, Deref)] +#[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { - trie_operations: HashMap, + pub(crate) account_nodes: HashMap, + pub(crate) removed_nodes: HashSet, + pub(crate) storage_tries: HashMap, } -impl From<[(TrieKey, TrieOp); N]> for TrieUpdates { - fn from(value: [(TrieKey, TrieOp); N]) -> Self { - Self { trie_operations: HashMap::from(value) } +impl TrieUpdates { + /// Returns `true` if the updates are empty. + pub fn is_empty(&self) -> bool { + self.account_nodes.is_empty() && + self.removed_nodes.is_empty() && + self.storage_tries.is_empty() } -} - -impl IntoIterator for TrieUpdates { - type Item = (TrieKey, TrieOp); - type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.trie_operations.into_iter() + /// Returns reference to updated account nodes. + pub const fn account_nodes_ref(&self) -> &HashMap { + &self.account_nodes } -} -impl TrieUpdates { - /// Schedule a delete operation on a trie key. - /// - /// # Panics - /// - /// If the key already exists and the operation is an update. - pub fn schedule_delete(&mut self, key: TrieKey) { - let existing = self.trie_operations.insert(key, TrieOp::Delete); - if let Some(op) = existing { - assert!(!op.is_update(), "Tried to delete a node that was already updated"); - } + /// Returns a reference to removed account nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes } - /// Extend the updates with trie updates. - pub fn extend(&mut self, updates: impl IntoIterator) { - self.trie_operations.extend(updates); + /// Returns a reference to updated storage tries. + pub const fn storage_tries_ref(&self) -> &HashMap { + &self.storage_tries } - /// Extend the updates with account trie updates. - pub fn extend_with_account_updates(&mut self, updates: HashMap) { - self.extend( - updates.into_iter().map(|(nibbles, node)| { - (TrieKey::AccountNode(nibbles.into()), TrieOp::Update(node)) - }), - ); + /// Insert storage updates for a given hashed address. + pub fn insert_storage_updates( + &mut self, + hashed_address: B256, + storage_updates: StorageTrieUpdates, + ) { + let existing = self.storage_tries.insert(hashed_address, storage_updates); + debug_assert!(existing.is_none()); } /// Finalize state trie updates. - pub fn finalize_state_updates( + pub fn finalize( &mut self, walker: TrieWalker, hash_builder: HashBuilder, destroyed_accounts: HashSet, ) { - // Add updates from trie walker. - let (_, walker_updates) = walker.split(); - self.extend(walker_updates); + // Retrieve deleted keys from trie walker. + let (_, removed_node_keys) = walker.split(); + self.removed_nodes.extend(removed_node_keys); - // Add account node updates from hash builder. - let (_, hash_builder_updates) = hash_builder.split(); - self.extend_with_account_updates(hash_builder_updates); + // Retrieve updated nodes from hash builder. + let (_, updated_nodes) = hash_builder.split(); + self.account_nodes.extend(updated_nodes); // Add deleted storage tries for destroyed accounts. - self.extend( - destroyed_accounts.into_iter().map(|key| (TrieKey::StorageTrie(key), TrieOp::Delete)), - ); + for destroyed in destroyed_accounts { + self.storage_tries.entry(destroyed).or_default().set_deleted(true); + } } - /// Finalize storage trie updates for a given address. - pub fn finalize_storage_updates( - &mut self, - hashed_address: B256, - walker: TrieWalker, - hash_builder: HashBuilder, - ) { - // Add updates from trie walker. - let (_, walker_updates) = walker.split(); - self.extend(walker_updates); - - // Add storage node updates from hash builder. - let (_, hash_builder_updates) = hash_builder.split(); - self.extend(hash_builder_updates.into_iter().map(|(nibbles, node)| { - (TrieKey::StorageNode(hashed_address, nibbles.into()), TrieOp::Update(node)) - })); + /// Converts trie updates into [`TrieUpdatesSorted`]. + pub fn into_sorted(self) -> TrieUpdatesSorted { + let mut account_nodes = Vec::from_iter(self.account_nodes); + account_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + let storage_tries = self + .storage_tries + .into_iter() + .map(|(hashed_address, updates)| (hashed_address, updates.into_sorted())) + .collect(); + TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } /// Flush updates all aggregated updates to the database. - pub fn flush(self, tx: &(impl DbTx + DbTxMut)) -> Result<(), reth_db::DatabaseError> { - if self.trie_operations.is_empty() { - return Ok(()); + /// + /// # Returns + /// + /// The number of storage trie entries updated in the database. + pub fn write_to_database(self, tx: &TX) -> Result + where + TX: DbTx + DbTxMut, + { + if self.is_empty() { + return Ok(0) } - let mut account_trie_cursor = tx.cursor_write::()?; - let mut storage_trie_cursor = tx.cursor_dup_write::()?; + // Track the number of inserted entries. + let mut num_entries = 0; - let mut trie_operations = Vec::from_iter(self.trie_operations); - trie_operations.sort_unstable_by(|a, b| a.0.cmp(&b.0)); - for (key, operation) in trie_operations { - match key { - TrieKey::AccountNode(nibbles) => match operation { - TrieOp::Delete => { - if account_trie_cursor.seek_exact(nibbles)?.is_some() { - account_trie_cursor.delete_current()?; - } - } - TrieOp::Update(node) => { - if !nibbles.0.is_empty() { - account_trie_cursor.upsert(nibbles, StoredBranchNode(node))?; - } - } - }, - TrieKey::StorageTrie(hashed_address) => match operation { - TrieOp::Delete => { - if storage_trie_cursor.seek_exact(hashed_address)?.is_some() { - storage_trie_cursor.delete_current_duplicates()?; - } + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut account_updates = self + .removed_nodes + .into_iter() + .filter_map(|n| (!self.account_nodes.contains_key(&n)).then_some((n, None))) + .collect::>(); + account_updates + .extend(self.account_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); + // Sort trie node updates. + account_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut account_trie_cursor = tx.cursor_write::()?; + for (key, updated_node) in account_updates { + let nibbles = StoredNibbles(key); + match updated_node { + Some(node) => { + if !nibbles.0.is_empty() { + num_entries += 1; + account_trie_cursor.upsert(nibbles, StoredBranchNode(node))?; } - TrieOp::Update(..) => unreachable!("Cannot update full storage trie."), - }, - TrieKey::StorageNode(hashed_address, nibbles) => { - if !nibbles.is_empty() { - // Delete the old entry if it exists. - if storage_trie_cursor - .seek_by_key_subkey(hashed_address, nibbles.clone())? - .filter(|e| e.nibbles == nibbles) - .is_some() - { - storage_trie_cursor.delete_current()?; - } - - // The operation is an update, insert new entry. - if let TrieOp::Update(node) = operation { - storage_trie_cursor - .upsert(hashed_address, StorageTrieEntry { nibbles, node })?; - } + } + None => { + num_entries += 1; + if account_trie_cursor.seek_exact(nibbles)?.is_some() { + account_trie_cursor.delete_current()?; } } - }; + } + } + + let mut storage_tries = Vec::from_iter(self.storage_tries); + storage_tries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + let mut storage_trie_cursor = tx.cursor_dup_write::()?; + for (hashed_address, storage_trie_updates) in storage_tries { + let updated_storage_entries = + storage_trie_updates.write_with_cursor(&mut storage_trie_cursor, hashed_address)?; + num_entries += updated_storage_entries; + } + + Ok(num_entries) + } +} + +/// Trie updates for storage trie of a single account. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct StorageTrieUpdates { + /// Flag indicating whether the trie was deleted. + pub(crate) is_deleted: bool, + /// Collection of updated storage trie nodes. + pub(crate) storage_nodes: HashMap, + /// Collection of removed storage trie nodes. + pub(crate) removed_nodes: HashSet, +} + +impl StorageTrieUpdates { + /// Returns empty storage trie updates with `deleted` set to `true`. + pub fn deleted() -> Self { + Self { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default(), + } + } + + /// Returns the length of updated nodes. + pub fn len(&self) -> usize { + (self.is_deleted as usize) + self.storage_nodes.len() + self.removed_nodes.len() + } + + /// Returns `true` if the trie was deleted. + pub const fn is_deleted(&self) -> bool { + self.is_deleted + } + + /// Returns reference to updated storage nodes. + pub const fn storage_nodes_ref(&self) -> &HashMap { + &self.storage_nodes + } + + /// Returns reference to removed storage nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes + } + + /// Returns `true` if storage updates are empty. + pub fn is_empty(&self) -> bool { + !self.is_deleted && self.storage_nodes.is_empty() && self.removed_nodes.is_empty() + } + + /// Sets `deleted` flag on the storage trie. + pub fn set_deleted(&mut self, deleted: bool) { + self.is_deleted = deleted; + } + + /// Finalize storage trie updates for by taking updates from walker and hash builder. + pub fn finalize(&mut self, walker: TrieWalker, hash_builder: HashBuilder) { + // Retrieve deleted keys from trie walker. + let (_, removed_keys) = walker.split(); + self.removed_nodes.extend(removed_keys); + + // Retrieve updated nodes from hash builder. + let (_, updated_nodes) = hash_builder.split(); + self.storage_nodes.extend(updated_nodes); + } + + /// Convert storage trie updates into [`StorageTrieUpdatesSorted`]. + pub fn into_sorted(self) -> StorageTrieUpdatesSorted { + let mut storage_nodes = Vec::from_iter(self.storage_nodes); + storage_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + StorageTrieUpdatesSorted { + is_deleted: self.is_deleted, + removed_nodes: self.removed_nodes, + storage_nodes, + } + } + + /// Initializes a storage trie cursor and writes updates to database. + pub fn write_to_database( + self, + tx: &TX, + hashed_address: B256, + ) -> Result + where + TX: DbTx + DbTxMut, + { + if self.is_empty() { + return Ok(0) } - Ok(()) + let mut cursor = tx.cursor_dup_write::()?; + self.write_with_cursor(&mut cursor, hashed_address) + } + + /// Writes updates to database. + /// + /// # Returns + /// + /// The number of storage trie entries updated in the database. + fn write_with_cursor( + self, + cursor: &mut C, + hashed_address: B256, + ) -> Result + where + C: DbCursorRO + + DbCursorRW + + DbDupCursorRO + + DbDupCursorRW, + { + // The storage trie for this account has to be deleted. + if self.is_deleted && cursor.seek_exact(hashed_address)?.is_some() { + cursor.delete_current_duplicates()?; + } + + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut storage_updates = self + .removed_nodes + .into_iter() + .filter_map(|n| (!self.storage_nodes.contains_key(&n)).then_some((n, None))) + .collect::>(); + storage_updates + .extend(self.storage_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); + // Sort trie node updates. + storage_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut num_entries = 0; + for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { + num_entries += 1; + let nibbles = StoredNibblesSubKey(nibbles); + // Delete the old entry if it exists. + if cursor + .seek_by_key_subkey(hashed_address, nibbles.clone())? + .filter(|e| e.nibbles == nibbles) + .is_some() + { + cursor.delete_current()?; + } + + // There is an updated version of this node, insert new entry. + if let Some(node) = maybe_updated { + cursor.upsert(hashed_address, StorageTrieEntry { nibbles, node })?; + } + } + + Ok(num_entries) + } +} + +/// Sorted trie updates used for lookups and insertions. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct TrieUpdatesSorted { + pub(crate) account_nodes: Vec<(Nibbles, BranchNodeCompact)>, + pub(crate) removed_nodes: HashSet, + pub(crate) storage_tries: HashMap, +} + +impl TrieUpdatesSorted { + /// Returns reference to updated account nodes. + pub fn account_nodes_ref(&self) -> &[(Nibbles, BranchNodeCompact)] { + &self.account_nodes + } + + /// Returns reference to removed account nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes + } + + /// Returns reference to updated storage tries. + pub const fn storage_tries_ref(&self) -> &HashMap { + &self.storage_tries + } +} + +/// Sorted trie updates used for lookups and insertions. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct StorageTrieUpdatesSorted { + pub(crate) is_deleted: bool, + pub(crate) storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, + pub(crate) removed_nodes: HashSet, +} + +impl StorageTrieUpdatesSorted { + /// Returns `true` if the trie was deleted. + pub const fn is_deleted(&self) -> bool { + self.is_deleted + } + + /// Returns reference to updated storage nodes. + pub fn storage_nodes_ref(&self) -> &[(Nibbles, BranchNodeCompact)] { + &self.storage_nodes + } + + /// Returns reference to removed storage nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes } } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 9959f4f95a5c..f88010e50949 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -1,11 +1,11 @@ use crate::{ prefix_set::PrefixSet, trie_cursor::{CursorSubNode, TrieCursor}, - updates::TrieUpdates, BranchNodeCompact, Nibbles, }; use reth_db::DatabaseError; use reth_primitives::B256; +use std::collections::HashSet; /// `TrieWalker` is a structure that enables traversal of a Merkle trie. /// It allows moving through the trie in a depth-first manner, skipping certain branches @@ -22,36 +22,31 @@ pub struct TrieWalker { pub can_skip_current_node: bool, /// A `PrefixSet` representing the changes to be applied to the trie. pub changes: PrefixSet, - /// The trie updates to be applied to the trie. - trie_updates: Option, + /// The retained trie node keys that need to be removed. + removed_keys: Option>, } impl TrieWalker { /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { let mut this = - Self { cursor, changes, stack, can_skip_current_node: false, trie_updates: None }; + Self { cursor, changes, stack, can_skip_current_node: false, removed_keys: None }; this.update_skip_node(); this } /// Sets the flag whether the trie updates should be stored. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - self.set_updates(retain_updates); - self - } - - /// Sets the flag whether the trie updates should be stored. - pub fn set_updates(&mut self, retain_updates: bool) { - if retain_updates { - self.trie_updates = Some(TrieUpdates::default()); + pub fn with_deletions_retained(mut self, retained: bool) -> Self { + if retained { + self.removed_keys = Some(HashSet::default()); } + self } /// Split the walker into stack and trie updates. - pub fn split(mut self) -> (Vec, TrieUpdates) { - let trie_updates = self.trie_updates.take(); - (self.stack, trie_updates.unwrap_or_default()) + pub fn split(mut self) -> (Vec, HashSet) { + let keys = self.removed_keys.take(); + (self.stack, keys.unwrap_or_default()) } /// Prints the current stack of trie nodes. @@ -63,9 +58,9 @@ impl TrieWalker { println!("====================== END STACK ======================\n"); } - /// The current length of the trie updates. - pub fn updates_len(&self) -> usize { - self.trie_updates.as_ref().map(|u| u.len()).unwrap_or(0) + /// The current length of the removed keys. + pub fn removed_keys_len(&self) -> usize { + self.removed_keys.as_ref().map_or(0, |u| u.len()) } /// Returns the current key in the trie. @@ -117,7 +112,7 @@ impl TrieWalker { changes, stack: vec![CursorSubNode::default()], can_skip_current_node: false, - trie_updates: None, + removed_keys: None, }; // Set up the root node of the trie in the stack, if it exists. @@ -193,8 +188,8 @@ impl TrieWalker { // Delete the current node if it's included in the prefix set or it doesn't contain the root // hash. if !self.can_skip_current_node || nibble != -1 { - if let Some((updates, key)) = self.trie_updates.as_mut().zip(self.cursor.current()?) { - updates.schedule_delete(key); + if let Some((keys, key)) = self.removed_keys.as_mut().zip(self.cursor.current()?) { + keys.insert(key); } } diff --git a/crates/trie/types/src/account.rs b/crates/trie/types/src/account.rs deleted file mode 100644 index 480a8c6a69e8..000000000000 --- a/crates/trie/types/src/account.rs +++ /dev/null @@ -1,22 +0,0 @@ -use alloy_primitives::{B256, U256}; -use alloy_rlp::{RlpDecodable, RlpEncodable}; - -/// An Ethereum account as represented in the trie. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)] -pub struct TrieAccount { - /// Account nonce. - pub nonce: u64, - /// Account balance. - pub balance: U256, - /// Account's storage root. - pub storage_root: B256, - /// Hash of the account's bytecode. - pub code_hash: B256, -} - -impl TrieAccount { - /// Get account's storage root. - pub const fn storage_root(&self) -> B256 { - self.storage_root - } -} diff --git a/deny.toml b/deny.toml index 4dff1b9ce250..431698495969 100644 --- a/deny.toml +++ b/deny.toml @@ -43,6 +43,7 @@ allow = [ "Unicode-DFS-2016", "Unlicense", "Unicode-3.0", + "Zlib", # https://github.com/briansmith/ring/issues/902 "LicenseRef-ring", # https://github.com/rustls/webpki/blob/main/LICENSE ISC Style @@ -63,7 +64,6 @@ exceptions = [ { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 - { allow = ["MPL-2.0"], name = "attohttpc" }, { allow = ["MPL-2.0"], name = "option-ext" }, { allow = ["MPL-2.0"], name = "webpki-roots" }, ] diff --git a/docs/crates/db.md b/docs/crates/db.md index b08383b7a6d9..2c2977b0674a 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -8,7 +8,7 @@ The database is a central component to Reth, enabling persistent storage for dat Within Reth, the database is organized via "tables". A table is any struct that implements the `Table` trait. -[File: crates/storage/db/src/abstraction/table.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/abstraction/table.rs#L55-L82) +[File: crates/storage/db-api/src/table.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/table.rs#L64-L93) ```rust ignore pub trait Table: Send + Sync + Debug + 'static { @@ -32,7 +32,7 @@ pub trait Value: Compress + Decompress + Serialize {} The `Table` trait has two generic values, `Key` and `Value`, which need to implement the `Key` and `Value` traits, respectively. The `Encode` trait is responsible for transforming data into bytes so it can be stored in the database, while the `Decode` trait transforms the bytes back into its original form. Similarly, the `Compress` and `Decompress` traits transform the data to and from a compressed format when storing or reading data from the database. -There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/tables/mod.rs#L161-L188) if you would like to see the table definitions for any of the tables below. +There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db/src/tables/mod.rs#L274-L414) if you would like to see the table definitions for any of the tables below. - CanonicalHeaders - HeaderTerminalDifficulties @@ -41,18 +41,18 @@ There are many tables within the node, all used to store different types of data - BlockBodyIndices - BlockOmmers - BlockWithdrawals -- TransactionBlocks - Transactions - TransactionHashNumbers +- TransactionBlocks - Receipts +- Bytecodes - PlainAccountState - PlainStorageState -- Bytecodes - AccountsHistory - StoragesHistory - AccountChangeSets - StorageChangeSets -- HashedAccount +- HashedAccounts - HashedStorages - AccountsTrie - StoragesTrie @@ -60,28 +60,41 @@ There are many tables within the node, all used to store different types of data - StageCheckpoints - StageCheckpointProgresses - PruneCheckpoints +- VersionHistory +- BlockRequests +- ChainState
## Database -Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. +Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. -[File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21) +[File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52) ```rust ignore -/// Main Database trait that spawns transactions to be executed. -pub trait Database { - /// RO database transaction - type TX: DbTx + Send + Sync + Debug; - /// RW database transaction - type TXMut: DbTxMut + DbTx + TableImporter + Send + Sync + Debug; +/// Main Database trait that can open read-only and read-write transactions. +/// +/// Sealed trait which cannot be implemented by 3rd parties, exposed only for consumption. +pub trait Database: Send + Sync { + /// Read-Only database transaction + type TX: DbTx + Send + Sync + Debug + 'static; + /// Read-Write database transaction + type TXMut: DbTxMut + DbTx + TableImporter + Send + Sync + Debug + 'static; + + /// Create read only transaction. + #[track_caller] + fn tx(&self) -> Result; + + /// Create read write transaction only possible if database is open with write access. + #[track_caller] + fn tx_mut(&self) -> Result; /// Takes a function and passes a read-only transaction into it, making sure it's closed in the /// end of the execution. - fn view(&self, f: F) -> Result + fn view(&self, f: F) -> Result where - F: Fn(&::TX) -> T, + F: FnOnce(&Self::TX) -> T, { let tx = self.tx()?; @@ -93,9 +106,9 @@ pub trait Database { /// Takes a function and passes a write-read transaction into it, making sure it's committed in /// the end of the execution. - fn update(&self, f: F) -> Result + fn update(&self, f: F) -> Result where - F: Fn(&::TXMut) -> T, + F: FnOnce(&Self::TXMut) -> T, { let tx = self.tx_mut()?; @@ -135,183 +148,183 @@ where The `Database` defines two associated types `TX` and `TXMut`. -[File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/database.rs#L11) +[File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L54-L78) The `TX` type can be any type that implements the `DbTx` trait, which provides a set of functions to interact with read only transactions. -[File: crates/storage/db/src/abstraction/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/transaction.rs#L36) +[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/transaction.rs#L7-L29) ```rust ignore /// Read only transaction pub trait DbTx: Send + Sync { /// Cursor type for this read-only transaction type Cursor: DbCursorRO + Send + Sync; - /// DupCursor type for this read-only transaction + /// `DupCursor` type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; /// Get value - fn get(&self, key: T::Key) -> Result, Error>; + fn get(&self, key: T::Key) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages - fn commit(self) -> Result; + fn commit(self) -> Result; + /// Aborts transaction + fn abort(self); /// Iterate over read only values in table. - fn cursor(&self) -> Result, Error>; + fn cursor_read(&self) -> Result, DatabaseError>; /// Iterate over read only values in dup sorted table. - fn cursor_dup(&self) -> Result, Error>; + fn cursor_dup_read(&self) -> Result, DatabaseError>; + /// Returns number of entries in the table. + fn entries(&self) -> Result; + /// Disables long-lived read transaction safety guarantees. + fn disable_long_read_transaction_safety(&mut self); } ``` The `TXMut` type can be any type that implements the `DbTxMut` trait, which provides a set of functions to interact with read/write transactions and the associated cursor types. -[File: crates/storage/db/src/abstraction/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/transaction.rs#L49) +[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/transaction.rs#L31-L54) ```rust ignore /// Read write transaction that allows writing to database pub trait DbTxMut: Send + Sync { /// Read-Write Cursor type type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; - /// Read-Write DupCursor type + /// Read-Write `DupCursor` type type DupCursorMut: DbDupCursorRW + DbCursorRW + DbDupCursorRO + DbCursorRO + Send + Sync; + /// Put value to database - fn put(&self, key: T::Key, value: T::Value) -> Result<(), Error>; + fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; /// Delete value from database - fn delete(&self, key: T::Key, value: Option) -> Result; + fn delete(&self, key: T::Key, value: Option) + -> Result; /// Clears database. - fn clear(&self) -> Result<(), Error>; - /// Cursor for writing - fn cursor_write(&self) -> Result, Error>; - /// DupCursor for writing - fn cursor_dup_write( - &self, - ) -> Result, Error>; + fn clear(&self) -> Result<(), DatabaseError>; + /// Cursor mut + fn cursor_write(&self) -> Result, DatabaseError>; + /// `DupCursor` mut. + fn cursor_dup_write(&self) -> Result, DatabaseError>; } ``` -Lets take a look at the `DbTx` and `DbTxMut` traits in action. Revisiting the `Transaction` struct as an example, the `Transaction::get_block_hash()` method uses the `DbTx::get()` function to get a block header hash in the form of `self.get::(number)`. +Let's take a look at the `DbTx` and `DbTxMut` traits in action. -[File: crates/storage/provider/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/provider/src/transaction.rs#L106) +Revisiting the `DatabaseProvider` struct as an exampl, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. -```rust ignore +[File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L1319-L1336) -impl<'this, DB> Transaction<'this, DB> -where - DB: Database, -{ +```rust ignore +impl HeaderProvider for DatabaseProvider { //--snip-- - /// Query [tables::CanonicalHeaders] table for block hash by block number - pub(crate) fn get_block_hash(&self, number: BlockNumber) -> Result { - let hash = self - .get::(number)? - .ok_or(ProviderError::CanonicalHash { number })?; - Ok(hash) + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.static_file_provider.get_with_static_file_or_database( + StaticFileSegment::Headers, + num, + |static_file| static_file.header_by_number(num), + || Ok(self.tx.get::(num)?), + ) } - //--snip-- -} -//--snip-- -impl<'a, DB: Database> Deref for Transaction<'a, DB> { - type Target = ::TXMut; - fn deref(&self) -> &Self::Target { - self.tx.as_ref().expect("Tried getting a reference to a non-existent transaction") - } + //--snip-- } ``` -The `Transaction` struct implements the `Deref` trait, which returns a reference to its `tx` field, which is a `TxMut`. Recall that `TxMut` is a generic type on the `Database` trait, which is defined as `type TXMut: DbTxMut + DbTx + Send + Sync;`, giving it access to all of the functions available to `DbTx`, including the `DbTx::get()` function. - Notice that the function uses a [turbofish](https://techblog.tonsser.com/posts/what-is-rusts-turbofish) to define which table to use when passing in the `key` to the `DbTx::get()` function. Taking a quick look at the function definition, a generic `T` is defined that implements the `Table` trait mentioned at the beginning of this chapter. -[File: crates/storage/db/src/abstraction/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/transaction.rs#L38) +[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/transaction.rs#L15) ```rust ignore -fn get(&self, key: T::Key) -> Result, Error>; +fn get(&self, key: T::Key) -> Result, DatabaseError>; ``` This design pattern is very powerful and allows Reth to use the methods available to the `DbTx` and `DbTxMut` traits without having to define implementation blocks for each table within the database. -Lets take a look at a couple examples before moving on. In the snippet below, the `DbTxMut::put()` method is used to insert values into the `CanonicalHeaders`, `Headers` and `HeaderNumbers` tables. +Let's take a look at a couple examples before moving on. In the snippet below, the `DbTxMut::put()` method is used to insert values into the `CanonicalHeaders`, `Headers` and `HeaderNumbers` tables. -[File: crates/storage/provider/src/block.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/provider/src/block.rs#L121-L125) +[File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L2606-L2745) ```rust ignore - tx.put::(block.number, block.hash())?; - // Put header with canonical hashes. - tx.put::(block.number, block.header.as_ref().clone())?; - tx.put::(block.hash(), block.number)?; +self.tx.put::(block_number, block.hash())?; +self.tx.put::(block_number, block.header.as_ref().clone())?; +self.tx.put::(block.hash(), block_number)?; ``` +Let's take a look at the `DatabaseProviderRW` struct, which is used to create a mutable transaction to interact with the database. +The `DatabaseProviderRW` struct implements the `Deref` and `DerefMut` trait, which returns a reference to its first field, which is a `TxMut`. Recall that `TxMut` is a generic type on the `Database` trait, which is defined as `type TXMut: DbTxMut + DbTx + Send + Sync;`, giving it access to all of the functions available to `DbTx`, including the `DbTx::get()` function. + This next example uses the `DbTx::cursor()` method to get a `Cursor`. The `Cursor` type provides a way to traverse through rows in a database table, one row at a time. A cursor enables the program to perform an operation (updating, deleting, etc) on each row in the table individually. The following code snippet gets a cursor for a few different tables in the database. -[File: crates/stages/src/stages/execution.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/execution.rs#L93-L101) +[File: crates/static-file/static-file/src/segments/headers.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/static-file/static-file/src/segments/headers.rs#L22-L58) ```rust ignore -// Get next canonical block hashes to execute. - let mut canonicals = db_tx.cursor_read::()?; - // Get header with canonical hashes. - let mut headers = db_tx.cursor_read::()?; - // Get bodies (to get tx index) with canonical hashes. - let mut cumulative_tx_count = db_tx.cursor_read::()?; - // Get transaction of the block that we are executing. - let mut tx = db_tx.cursor_read::()?; - // Skip sender recovery and load signer from database. - let mut tx_sender = db_tx.cursor_read::()?; - +# Get a cursor for the Headers table +let mut headers_cursor = provider.tx_ref().cursor_read::()?; +# Then we can walk the cursor to get the headers for a specific block range +let headers_walker = headers_cursor.walk_range(block_range.clone())?; ``` Lets look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. -[File: crates/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/bodies.rs#L205-L238) +[File: crates/stages/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/stages/stages/src/stages/bodies.rs#L267-L345) ```rust ignore - /// Unwind the stage. - async fn unwind( - &mut self, - db: &mut Transaction<'_, DB>, - input: UnwindInput, - ) -> Result> { - let mut tx_count_cursor = db.cursor_write::()?; - let mut block_ommers_cursor = db.cursor_write::()?; - let mut transaction_cursor = db.cursor_write::()?; - - let mut entry = tx_count_cursor.last()?; - while let Some((key, count)) = entry { - if key.number() <= input.unwind_to { - break - } - - tx_count_cursor.delete_current()?; - entry = tx_count_cursor.prev()?; - - if block_ommers_cursor.seek_exact(key)?.is_some() { - block_ommers_cursor.delete_current()?; - } - - let prev_count = entry.map(|(_, v)| v).unwrap_or_default(); - for tx_id in prev_count..count { - if transaction_cursor.seek_exact(tx_id)?.is_some() { - transaction_cursor.delete_current()?; - } - } +/// Unwind the stage. +fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { + self.buffer.take(); + + let static_file_provider = provider.static_file_provider(); + let tx = provider.tx_ref(); + // Cursors to unwind bodies, ommers + let mut body_cursor = tx.cursor_write::()?; + let mut ommers_cursor = tx.cursor_write::()?; + let mut withdrawals_cursor = tx.cursor_write::()?; + let mut requests_cursor = tx.cursor_write::()?; + // Cursors to unwind transitions + let mut tx_block_cursor = tx.cursor_write::()?; + + let mut rev_walker = body_cursor.walk_back(None)?; + while let Some((number, block_meta)) = rev_walker.next().transpose()? { + if number <= input.unwind_to { + break } - //--snip-- - } + // Delete the ommers entry if any + if ommers_cursor.seek_exact(number)?.is_some() { + ommers_cursor.delete_current()?; + } -``` + // Delete the withdrawals entry if any + if withdrawals_cursor.seek_exact(number)?.is_some() { + withdrawals_cursor.delete_current()?; + } -This function first grabs a mutable cursor for the `CumulativeTxCount`, `BlockOmmers` and `Transactions` tables. + // Delete the requests entry if any + if requests_cursor.seek_exact(number)?.is_some() { + requests_cursor.delete_current()?; + } + + // Delete all transaction to block values. + if !block_meta.is_empty() && + tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() + { + tx_block_cursor.delete_current()?; + } -The `tx_count_cursor` is used to get the last key value pair written to the `CumulativeTxCount` table and delete key value pair where the cursor is currently pointing. + // Delete the current body value + rev_walker.delete_current()?; + } + //--snip-- +} +``` -The `block_ommers_cursor` is used to get the block ommers from the `BlockOmmers` table at the specified key, and delete the entry where the cursor is currently pointing. +This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `BlockRequests`, `TransactionBlocks` tables. -Finally, the `transaction_cursor` is used to get delete each transaction from the last `TXNumber` written to the database, to the current tx count. +Then it gets a walker of the block body cursor, and then walk backwards through the cursor to delete the block body entries from the last block number to the block number specified in the `UnwindInput` struct. While this is a brief look at how cursors work in the context of database tables, the chapter on the `libmdbx` crate will go into further detail on how cursors communicate with the database and what is actually happening under the hood. diff --git a/docs/crates/discv4.md b/docs/crates/discv4.md index 5abe7c439b99..348c68e06b13 100644 --- a/docs/crates/discv4.md +++ b/docs/crates/discv4.md @@ -126,7 +126,7 @@ The `NodeRecord::from_secret_key()` takes the socket address used for discovery If the `discv4_config` supplied to the `Discovery::new()` function is `None`, the discv4 service will not be spawned. In this case, no new peers will be discovered across the network. The node will have to rely on manually added peers. However, if the `discv4_config` contains a `Some(Discv4Config)` value, then the `Discv4::bind()` function is called to bind to a new UdpSocket and create the disc_v4 service. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L188) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L178) ```rust ignore impl Discv4 { //--snip-- @@ -155,7 +155,7 @@ impl Discv4 { To better understand what is actually happening when the disc_v4 service is created, lets take a deeper look at the `Discv4Service::new()` function. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L392) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L495) ```rust ignore impl Discv4Service { /// Create a new instance for a bound [`UdpSocket`]. @@ -216,7 +216,7 @@ In Rust, the owner of a [`Future`](https://doc.rust-lang.org/std/future/trait.Fu Lets take a detailed look at how `Discv4Service::poll` works under the hood. This function has many moving parts, so we will break it up into smaller sections. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L1302) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L495) ```rust ignore impl Discv4Service { //--snip-- @@ -259,7 +259,7 @@ impl Discv4Service { As the function starts, a `loop` is entered and the `Discv4Service.queued_events` are evaluated to see if there are any events ready to be processed. If there is an event ready, the function immediately returns the event wrapped in `Poll::Ready()`. The `queued_events` field is a `VecDeque` where `Discv4Event` is an enum containing one of the following variants. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L1455) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L1770) ```rust ignore pub enum Discv4Event { /// A `Ping` message was handled. @@ -285,7 +285,7 @@ Next, the Discv4Service handles all incoming `Discv4Command`s until there are no In Reth, once a new `NetworkState` is initialized as the node starts up and a new task is spawned to handle the network, the `poll()` function is used to advance the state of the network. -[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/state.rs#L377) +[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/network/src/state.rs#L396) ```rust ignore impl NetworkState where diff --git a/docs/crates/stages.md b/docs/crates/stages.md index 8e3de4a044a9..c7815b453b4e 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -90,7 +90,7 @@ pub struct SealedHeader { Each `SealedHeader` is then validated to ensure that it has the proper parent. Note that this is only a basic response validation, and the `HeaderDownloader` uses the `validate` method during the `stream`, so that each header is validated according to the consensus specification before the header is yielded from the stream. After this, each header is then written to the database. If a header is not valid or the stream encounters any other error, the error is propagated up through the stage execution, the changes to the database are unwound and the stage is resumed from the most recent valid state. -This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has completed successfully. +This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has been completed successfully.
diff --git a/docs/design/review.md b/docs/design/review.md index 329d7b2d4764..693c991a777f 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -25,7 +25,7 @@ This document contains some of our research in how other codebases designed vari ## Header Downloaders * Erigon Header Downloader: - * A header downloader algo was introduced in [`erigon#1016`](https://github.com/ledgerwatch/erigon/pull/1016) and finished in [`erigon#1145`](https://github.com/ledgerwatch/erigon/pull/1145). At a high level, the downloader concurrently requested headers by hash, then sorted, validated and fused the responses into chain segments. Smaller segments were fused into larger as the gaps between them were filled. The downloader also used to maintain hardcoded hashes (later renamed to preverified) to bootstrap the sync. + * A header downloader algo was introduced in [`erigon#1016`](https://github.com/ledgerwatch/erigon/pull/1016) and finished in [`erigon#1145`](https://github.com/ledgerwatch/erigon/pull/1145). At a high level, the downloader concurrently requested headers by hash, then sorted, validated and fused the responses into chain segments. Smaller segments were fused into larger as the gaps between them were filled. The downloader is also used to maintain hardcoded hashes (later renamed to preverified) to bootstrap the sync. * The downloader was refactored multiple times: [`erigon#1471`](https://github.com/ledgerwatch/erigon/pull/1471), [`erigon#1559`](https://github.com/ledgerwatch/erigon/pull/1559) and [`erigon#2035`](https://github.com/ledgerwatch/erigon/pull/2035). * With PoS transition in [`erigon#3075`](https://github.com/ledgerwatch/erigon/pull/3075) terminal td was introduced to the algo to stop forward syncing. For the downward sync (post merge), the download was now delegated to [`EthBackendServer`](https://github.com/ledgerwatch/erigon/blob/3c95db00788dc740849c2207d886fe4db5a8c473/ethdb/privateapi/ethbackend.go#L245) * Proper reverse PoS downloader was introduced in [`erigon#3092`](https://github.com/ledgerwatch/erigon/pull/3092) which downloads the header batches from tip until local head is reached. Refactored later in [`erigon#3340`](https://github.com/ledgerwatch/erigon/pull/3340) and [`erigon#3717`](https://github.com/ledgerwatch/erigon/pull/3717). diff --git a/docs/repo/ci.md b/docs/repo/ci.md index 18356ddb7320..d69e12c5d3a5 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -4,7 +4,6 @@ The CI runs a couple of workflows: ### Code -- **[ci]**: A catch-all for small jobs. Currently only runs lints (rustfmt, clippy etc.) - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) - **[bench]**: Runs benchmarks @@ -16,14 +15,11 @@ The CI runs a couple of workflows: ### Meta - **[deny]**: Runs `cargo deny` to check for license conflicts and security advisories in our dependencies -- **[sanity]**: Runs a couple of sanity checks on the code every night, such as checking for unused dependencies - **[release]**: Runs the release workflow -[ci]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/ci.yml [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml -[sanity]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sanity.yml [release]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/release.yml diff --git a/docs/repo/layout.md b/docs/repo/layout.md index c3c53321fac3..f7f0e93eab2e 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -56,7 +56,7 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ #### Common -- [`net/common`](../../crates/net/common): Shared types used across multiple networking crates. +- [`net/banlist`](../../crates/net/banlist): A simple peer banlist that can be used to ban peers or IP addresses. - Contains: Peer banlist. - [`net/network-api`](../../crates/net/network-api): Contains traits that define the networking component as a whole. Other components that interface with the network stack only need to depend on this crate for the relevant types. - [`net/nat`](../../crates/net/nat): A small helper crate that resolves the external IP of the running node using various methods (such as a manually provided IP, using UPnP etc.) @@ -137,11 +137,9 @@ Crates related to building and validating payloads (blocks). ### Primitives -These crates define primitive types or algorithms such as RLP. +These crates define primitive types or algorithms. - [`primitives`](../../crates/primitives): Commonly used types in Reth. -- [`rlp`](../../crates/rlp): An implementation of RLP, forked from an earlier Apache-licensed version of [`fastrlp`][fastrlp] -- [`rlp/rlp-derive`](../../crates/rlp/rlp-derive): Forked from an earlier Apache licenced version of the [`fastrlp-derive`][fastrlp-derive] crate, before it changed licence to GPL. - [`trie`](../../crates/trie): An implementation of a Merkle Patricia Trie used for various roots (e.g. the state root) in Ethereum. ### Misc @@ -154,8 +152,6 @@ Small utility crates. - [`metrics/metrics-derive`](../../crates/metrics/metrics-derive): A derive-style API for creating metrics - [`tracing`](../../crates/tracing): A small utility crate to install a uniform [`tracing`][tracing] subscriber -[fastrlp]: https://crates.io/crates/fastrlp -[fastrlp-derive]: https://crates.io/crates/fastrlp-derive [libmdbx-rs]: https://crates.io/crates/libmdbx [discv4]: https://github.com/ethereum/devp2p/blob/master/discv4.md [jsonrpsee]: https://github.com/paritytech/jsonrpsee/ diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index c313bf283a34..6f02a2a56149 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -46,7 +46,7 @@ services: grafana: restart: unless-stopped - image: grafana/grafana:10.3.3 + image: grafana/grafana:latest depends_on: - reth - prometheus diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index e56c94b112c9..d9b3cdefd0e5 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -8177,6 +8177,529 @@ ], "title": "Number of ExExs", "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 298 + }, + "id": 226, + "panels": [], + "title": "Eth Requests", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 299 + }, + "id": 225, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_headers_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Headers Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Headers Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 299 + }, + "id": 227, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_receipts_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Receipts Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Receipts Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 306 + }, + "id": 235, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_bodies_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Bodies Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Bodies Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 306 + }, + "id": 234, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_node_data_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Node Data Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Node Data Requests Received", + "type": "timeseries" } ], "refresh": "30s", diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index f93276e509a4..90fe5ba8d2d3 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1458,7 +1458,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_network_pool_transactions_messages_sent_total{instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_network_pool_transactions_messages_sent{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "instant": false, "legendFormat": "Tx", @@ -1471,7 +1471,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "rate(reth_network_pool_transactions_messages_received_total{instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_network_pool_transactions_messages_received{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Rx", "range": true, @@ -1483,7 +1483,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_network_pool_transactions_messages_sent_total{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received_total{instance=~\"$instance\"}", + "expr": "reth_network_pool_transactions_messages_sent{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received{instance=~\"$instance\"}", "hide": false, "legendFormat": "Messages in Channel", "range": true, @@ -1719,6 +1719,23 @@ "range": true, "refId": "C", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_transactions_nonce_gaps{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Blob transactions nonce gaps", + "range": true, + "refId": "D", + "useBackend": false } ], "title": "All Transactions metrics", diff --git a/examples/README.md b/examples/README.md index 6605fd2972b3..b24b7387f32d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,25 +10,27 @@ to make a PR! ## Node Builder -| Example | Description | -| -------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | -| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | -| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | -| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | -| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | -| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | -| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | -| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | +| Example | Description | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom Stateful Precompile](./stateful-precompile) | Illustrates how to implement a node with a stateful precompile | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | ## ExEx -| Example | Description | -|-------------------------------------------|-----------------------------------------------------------------------------------| -| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | -| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | -| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | -| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | +| Example | Description | +| ----------------------------------------- | --------------------------------------------------------------------------------------------------- | +| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | +| [Minimal](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| [Discv5 as ExEx](./exex/discv5) | Illustrates an ExEx that runs discv5 discovery stack | ## RPC @@ -57,11 +59,11 @@ to make a PR! ## P2P -| Example | Description | -| --------------------------- | ----------------------------------------------------------------- | -| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | -| [Polygon P2P](./polygon-p2p) | Illustrates how to connect and communicate with a peer on Polygon | -| [BSC P2P](./bsc-p2p) | Illustrates how to connect and communicate with a peer on Binance Smart Chain | +| Example | Description | +| ---------------------------- | ----------------------------------------------------------------------------- | +| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | +| [Polygon P2P](./polygon-p2p) | Illustrates how to connect and communicate with a peer on Polygon | +| [BSC P2P](./bsc-p2p) | Illustrates how to connect and communicate with a peer on Binance Smart Chain | ## Misc diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml index 3da9788262d3..80f5f726d96d 100644 --- a/examples/beacon-api-sidecar-fetcher/Cargo.toml +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "beacon-api-sidecar-fetcher" +name = "example-beacon-api-sidecar-fetcher" version = "0.1.0" publish = false edition.workspace = true diff --git a/examples/beacon-api-sse/Cargo.toml b/examples/beacon-api-sse/Cargo.toml index 35bc4be0c304..8667ae7ab1b3 100644 --- a/examples/beacon-api-sse/Cargo.toml +++ b/examples/beacon-api-sse/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "beacon-api-sse" +name = "example-beacon-api-sse" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/bsc-p2p/Cargo.toml b/examples/bsc-p2p/Cargo.toml index 984130590cfd..dde02080d135 100644 --- a/examples/bsc-p2p/Cargo.toml +++ b/examples/bsc-p2p/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "bsc-p2p" +name = "example-bsc-p2p" version = "0.0.0" publish = false edition.workspace = true @@ -8,9 +8,11 @@ license.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +reth-chainspec.workspace = true reth-discv4 = { workspace = true, features = ["test-utils"] } reth-network = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true +reth-network-peers.workspace = true reth-primitives.workspace = true reth-tracing.workspace = true diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs index 65169c734155..0c4cbe1ed961 100644 --- a/examples/bsc-p2p/src/chainspec.rs +++ b/examples/bsc-p2p/src/chainspec.rs @@ -1,8 +1,10 @@ -use reth_primitives::{ - b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork, NodeRecord, B256, +use reth_chainspec::{ + BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, }; +use reth_network_peers::NodeRecord; +use reth_primitives::{b256, B256}; -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; pub const SHANGHAI_TIME: u64 = 1705996800; @@ -14,9 +16,12 @@ pub(crate) fn bsc_chain_spec() -> Arc { genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), paris_block_and_final_difficulty: None, - hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Timestamp(SHANGHAI_TIME))]), + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Shanghai.boxed(), + ForkCondition::Timestamp(SHANGHAI_TIME), + )]), deposit_contract: None, - base_fee_params: reth_primitives::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), + base_fee_params: reth_chainspec::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: 0, } .into() diff --git a/examples/custom-dev-node/Cargo.toml b/examples/custom-dev-node/Cargo.toml index 3cd624cea1fc..d40c97ca658d 100644 --- a/examples/custom-dev-node/Cargo.toml +++ b/examples/custom-dev-node/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-dev-node" +name = "example-custom-dev-node" version = "0.0.0" publish = false edition.workspace = true @@ -8,9 +8,10 @@ license.workspace = true [dependencies] reth.workspace = true +reth-chainspec.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } futures-util.workspace = true eyre.workspace = true diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 4788e02b8b15..176e4c503827 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -3,17 +3,19 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use std::sync::Arc; + use futures_util::StreamExt; use reth::{ builder::{NodeBuilder, NodeHandle}, providers::CanonStateSubscriptions, - rpc::eth::EthTransactions, + rpc::api::eth::helpers::EthTransactions, tasks::TaskManager, }; +use reth_chainspec::ChainSpec; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; -use reth_primitives::{b256, hex, ChainSpec, Genesis}; -use std::sync::Arc; +use reth_primitives::{b256, hex, Genesis}; #[tokio::main] async fn main() -> eyre::Result<()> { @@ -25,7 +27,7 @@ async fn main() -> eyre::Result<()> { .with_rpc(RpcServerArgs::default().with_http()) .with_chain(custom_chain()); - let NodeHandle { mut node, node_exit_future: _ } = NodeBuilder::new(node_config) + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) .testing_node(tasks.executor()) .node(EthereumNode::default()) .launch() diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 7386313068a0..c00863147f9b 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-engine-types" +name = "example-custom-engine-types" version = "0.0.0" publish = false edition.workspace = true @@ -7,6 +7,7 @@ license.workspace = true [dependencies] reth.workspace = true +reth-chainspec.workspace = true reth-rpc-types.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true @@ -14,8 +15,9 @@ reth-primitives.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +alloy-genesis.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 04b46932d2ab..298e6d06a33a 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -22,7 +22,9 @@ use std::convert::Infallible; use serde::{Deserialize, Serialize}; use thiserror::Error; +use alloy_genesis::Genesis; use reth::{ + api::PayloadTypes, builder::{ components::{ComponentsBuilder, PayloadServiceBuilder}, node::NodeTypes, @@ -37,19 +39,20 @@ use reth_basic_payload_builder::{ BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig, }; +use reth_chainspec::{Chain, ChainSpec}; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, validate_version_specific_fields, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::node::{ - EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, + EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, }; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderHandle, PayloadBuilderService, }; -use reth_primitives::{Address, Chain, ChainSpec, Genesis, Header, Withdrawals, B256}; +use reth_primitives::{Address, Header, Withdrawals, B256}; use reth_rpc_types::{ engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -162,10 +165,13 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { #[non_exhaustive] pub struct CustomEngineTypes; -impl EngineTypes for CustomEngineTypes { +impl PayloadTypes for CustomEngineTypes { + type BuiltPayload = EthBuiltPayload; type PayloadAttributes = CustomPayloadAttributes; type PayloadBuilderAttributes = CustomPayloadBuilderAttributes; - type BuiltPayload = EthBuiltPayload; +} + +impl EngineTypes for CustomEngineTypes { type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; @@ -204,6 +210,7 @@ where CustomPayloadServiceBuilder, EthereumNetworkBuilder, EthereumExecutorBuilder, + EthereumConsensusBuilder, >; fn components_builder(self) -> Self::ComponentsBuilder { @@ -213,6 +220,7 @@ where .payload(CustomPayloadServiceBuilder::default()) .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) + .consensus(EthereumConsensusBuilder::default()) } } diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index d1b5221ed567..7642dc80cf2f 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-evm" +name = "example-custom-evm" version = "0.0.0" publish = false edition.workspace = true @@ -7,11 +7,14 @@ license.workspace = true [dependencies] reth.workspace = true +reth-chainspec.workspace = true +reth-evm-ethereum.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +alloy-genesis.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index f44903f1a127..207640dce9c9 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -2,25 +2,31 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_genesis::Genesis; use reth::{ builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, primitives::{ address, - revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, - Address, Bytes, U256, + revm_primitives::{Env, PrecompileResult}, + Bytes, }, revm::{ handler::register::EvmHandler, inspector_handle_register, - precompile::{Precompile, PrecompileSpecId, Precompiles}, - Database, Evm, EvmBuilder, GetInspector, + precompile::{Precompile, PrecompileOutput, PrecompileSpecId}, + ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, tasks::TaskManager, }; +use reth_chainspec::{Chain, ChainSpec, Head}; +use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; -use reth_primitives::{Chain, ChainSpec, Genesis, Header, TransactionSigned}; +use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; +use reth_primitives::{ + revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Address, Header, TransactionSigned, U256, +}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -45,33 +51,58 @@ impl MyEvmConfig { // install the precompiles handler.pre_execution.load_precompiles = Arc::new(move || { - let mut precompiles = Precompiles::new(PrecompileSpecId::from_spec_id(spec_id)).clone(); - precompiles.inner.insert( + let mut precompiles = ContextPrecompiles::new(PrecompileSpecId::from_spec_id(spec_id)); + precompiles.extend([( address!("0000000000000000000000000000000000000999"), - Precompile::Env(Self::my_precompile), - ); - precompiles.into() + Precompile::Env(Self::my_precompile).into(), + )]); + precompiles }); } /// A custom precompile that does nothing fn my_precompile(_data: &Bytes, _gas: u64, _env: &Env) -> PrecompileResult { - Ok((0, Bytes::new())) + Ok(PrecompileOutput::new(0, Bytes::new())) } } impl ConfigureEvmEnv for MyEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - EthEvmConfig::fill_tx_env(tx_env, transaction, sender) - } - fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - EthEvmConfig::fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) + let spec_id = reth_evm_ethereum::revm_spec( + chain_spec, + &Head { + number: header.number, + timestamp: header.timestamp, + difficulty: header.difficulty, + total_difficulty, + hash: Default::default(), + }, + ); + + cfg_env.chain_id = chain_spec.chain().id(); + cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; + + cfg_env.handler_cfg.spec_id = spec_id; + } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + EthEvmConfig::default().fill_tx_env_system_contract_call(env, caller, contract, data) } } diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index d2dc4ba14ad1..443cf57b3166 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-inspector" +name = "example-custom-inspector" version = "0.0.0" publish = false edition.workspace = true @@ -8,5 +8,6 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true +reth-rpc-types.workspace = true clap = { workspace = true, features = ["derive"] } -futures-util.workspace = true \ No newline at end of file +futures-util.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index b0fe4fbb8c2e..b6721eded67c 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -21,26 +21,24 @@ use reth::{ interpreter::{Interpreter, OpCode}, Database, Evm, EvmContext, Inspector, }, - rpc::{ - compat::transaction::transaction_to_call_request, - eth::{revm_utils::EvmOverrides, EthTransactions}, - }, + rpc::{api::eth::helpers::Call, compat::transaction::transaction_to_call_request}, transaction_pool::TransactionPool, }; use reth_node_ethereum::node::EthereumNode; +use reth_rpc_types::state::EvmOverrides; fn main() { Cli::::parse() .run(|builder, args| async move { // launch the node - let NodeHandle { mut node, node_exit_future } = + let NodeHandle { node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; // create a new subscription to pending transactions let mut pending_transactions = node.pool.new_pending_pool_transactions_listener(); // get an instance of the `trace_` API handler - let eth_api = node.rpc_registry.eth_api(); + let eth_api = node.rpc_registry.eth_api().clone(); println!("Spawning trace task!"); diff --git a/examples/custom-node-components/Cargo.toml b/examples/custom-node-components/Cargo.toml index 761eeaf1b60f..467914102112 100644 --- a/examples/custom-node-components/Cargo.toml +++ b/examples/custom-node-components/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-node-components" +name = "example-custom-node-components" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 19bc9777bea8..842627797ef4 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -39,7 +39,7 @@ pub struct CustomPoolBuilder { pool_config: PoolConfig, } -/// Implement the `PoolBuilder` trait for the custom pool builder +/// Implement the [`PoolBuilder`] trait for the custom pool builder /// /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder diff --git a/examples/custom-payload-builder/Cargo.toml b/examples/custom-payload-builder/Cargo.toml index 58fa775271c6..f10bd8058b64 100644 --- a/examples/custom-payload-builder/Cargo.toml +++ b/examples/custom-payload-builder/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-payload-builder" +name = "example-custom-payload-builder" version = "0.0.0" publish = false edition.workspace = true @@ -7,6 +7,7 @@ license.workspace = true [dependencies] reth.workspace = true +reth-chainspec.workspace = true reth-primitives.workspace = true reth-node-api.workspace = true reth-basic-payload-builder.workspace = true @@ -16,4 +17,4 @@ reth-ethereum-payload-builder.workspace = true tracing.workspace = true futures-util.workspace = true -eyre.workspace = true \ No newline at end of file +eyre.workspace = true diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 288b20de43c0..a220315cb427 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -5,9 +5,10 @@ use reth::{ transaction_pool::TransactionPool, }; use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; +use reth_chainspec::ChainSpec; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{error::PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::{BlockNumberOrTag, Bytes, ChainSpec}; +use reth_primitives::{BlockNumberOrTag, Bytes}; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. diff --git a/examples/custom-rlpx-subprotocol/Cargo.toml b/examples/custom-rlpx-subprotocol/Cargo.toml new file mode 100644 index 000000000000..d2d1caab6355 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "example-custom-rlpx-subprotocol" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + + +[dependencies] +tokio = { workspace = true, features = ["full"] } +futures.workspace = true +reth-eth-wire.workspace = true +reth-network.workspace = true +reth-network-api.workspace = true +reth-node-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-primitives.workspace = true +reth-rpc-types.workspace = true +reth.workspace = true +tokio-stream.workspace = true +eyre.workspace = true +rand.workspace = true +tracing.workspace = true diff --git a/examples/custom-rlpx-subprotocol/src/main.rs b/examples/custom-rlpx-subprotocol/src/main.rs new file mode 100644 index 000000000000..3a198c38d285 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/main.rs @@ -0,0 +1,104 @@ +//! Example for how to customize the network layer by adding a custom rlpx subprotocol. +//! +//! Run with +//! +//! ```not_rust +//! cargo run -p example-custom-rlpx-subprotocol -- node +//! ``` +//! +//! This launch a regular reth node with a custom rlpx subprotocol. +use reth::builder::NodeHandle; +use reth_network::{ + config::SecretKey, protocol::IntoRlpxSubProtocol, NetworkConfig, NetworkManager, + NetworkProtocols, +}; +use reth_network_api::NetworkInfo; +use reth_node_ethereum::EthereumNode; +use reth_provider::test_utils::NoopProvider; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use subprotocol::{ + connection::CustomCommand, + protocol::{ + event::ProtocolEvent, + handler::{CustomRlpxProtoHandler, ProtocolState}, + }, +}; +use tokio::sync::{mpsc, oneshot}; +use tracing::info; + +mod subprotocol; + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _args| async move { + // launch the node + let NodeHandle { node, node_exit_future } = + builder.node(EthereumNode::default()).launch().await?; + let peer_id = node.network.peer_id(); + let peer_addr = node.network.local_addr(); + + // add the custom network subprotocol to the launched node + let (tx, mut from_peer0) = mpsc::unbounded_channel(); + let custom_rlpx_handler = CustomRlpxProtoHandler { state: ProtocolState { events: tx } }; + node.network.add_rlpx_sub_protocol(custom_rlpx_handler.into_rlpx_sub_protocol()); + + // creates a separate network instance and adds the custom network subprotocol + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let (tx, mut from_peer1) = mpsc::unbounded_channel(); + let custom_rlpx_handler_2 = CustomRlpxProtoHandler { state: ProtocolState { events: tx } }; + let net_cfg = NetworkConfig::builder(secret_key) + .listener_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))) + .disable_discovery() + .add_rlpx_sub_protocol(custom_rlpx_handler_2.into_rlpx_sub_protocol()) + .build(NoopProvider::default()); + + // spawn the second network instance + let subnetwork = NetworkManager::new(net_cfg).await?; + let subnetwork_peer_id = *subnetwork.peer_id(); + let subnetwork_peer_addr = subnetwork.local_addr(); + let subnetwork_handle = subnetwork.peers_handle(); + node.task_executor.spawn(subnetwork); + + // connect the launched node to the subnetwork + node.network.peers_handle().add_peer(subnetwork_peer_id, subnetwork_peer_addr); + + // connect the subnetwork to the launched node + subnetwork_handle.add_peer(*peer_id, peer_addr); + + // establish connection between peer0 and peer1 + let peer0_to_peer1 = from_peer0.recv().await.expect("peer0 connecting to peer1"); + let peer0_conn = match peer0_to_peer1 { + ProtocolEvent::Established { direction: _, peer_id, to_connection } => { + assert_eq!(peer_id, subnetwork_peer_id); + to_connection + } + }; + + // establish connection between peer1 and peer0 + let peer1_to_peer0 = from_peer1.recv().await.expect("peer1 connecting to peer0"); + let peer1_conn = match peer1_to_peer0 { + ProtocolEvent::Established { direction: _, peer_id: peer1_id, to_connection } => { + assert_eq!(peer1_id, *peer_id); + to_connection + } + }; + info!(target:"rlpx-subprotocol", "Connection established!"); + + // send a ping message from peer0 to peer1 + let (tx, rx) = oneshot::channel(); + peer0_conn.send(CustomCommand::Message { msg: "hello!".to_string(), response: tx })?; + let response = rx.await?; + assert_eq!(response, "hello!"); + info!(target:"rlpx-subprotocol", ?response, "New message received"); + + // send a ping message from peer1 to peer0 + let (tx, rx) = oneshot::channel(); + peer1_conn.send(CustomCommand::Message { msg: "world!".to_string(), response: tx })?; + let response = rx.await?; + assert_eq!(response, "world!"); + info!(target:"rlpx-subprotocol", ?response, "New message received"); + + info!(target:"rlpx-subprotocol", "Peers connected via custom rlpx subprotocol!"); + + node_exit_future.await + }) +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs new file mode 100644 index 000000000000..dae2d5c8679e --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs @@ -0,0 +1,53 @@ +use super::CustomRlpxConnection; +use crate::subprotocol::protocol::{ + event::ProtocolEvent, handler::ProtocolState, proto::CustomRlpxProtoMessage, +}; +use reth_eth_wire::{ + capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, +}; +use reth_network::protocol::{ConnectionHandler, OnNotSupported}; +use reth_network_api::Direction; +use reth_rpc_types::PeerId; +use tokio::sync::mpsc; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// The connection handler for the custom RLPx protocol. +pub(crate) struct CustomRlpxConnectionHandler { + pub(crate) state: ProtocolState, +} + +impl ConnectionHandler for CustomRlpxConnectionHandler { + type Connection = CustomRlpxConnection; + + fn protocol(&self) -> Protocol { + CustomRlpxProtoMessage::protocol() + } + + fn on_unsupported_by_peer( + self, + _supported: &SharedCapabilities, + _direction: Direction, + _peer_id: PeerId, + ) -> OnNotSupported { + OnNotSupported::KeepAlive + } + + fn into_connection( + self, + direction: Direction, + peer_id: PeerId, + conn: ProtocolConnection, + ) -> Self::Connection { + let (tx, rx) = mpsc::unbounded_channel(); + self.state + .events + .send(ProtocolEvent::Established { direction, peer_id, to_connection: tx }) + .ok(); + CustomRlpxConnection { + conn, + initial_ping: direction.is_outgoing().then(CustomRlpxProtoMessage::ping), + commands: UnboundedReceiverStream::new(rx), + pending_pong: None, + } + } +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs new file mode 100644 index 000000000000..a6d835b70c26 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs @@ -0,0 +1,76 @@ +use super::protocol::proto::{CustomRlpxProtoMessage, CustomRlpxProtoMessageKind}; +use futures::{Stream, StreamExt}; +use reth_eth_wire::multiplex::ProtocolConnection; +use reth_primitives::BytesMut; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::sync::oneshot; +use tokio_stream::wrappers::UnboundedReceiverStream; + +pub(crate) mod handler; + +/// We define some custom commands that the subprotocol supports. +pub(crate) enum CustomCommand { + /// Sends a message to the peer + Message { + msg: String, + /// The response will be sent to this channel. + response: oneshot::Sender, + }, +} + +/// The connection handler for the custom RLPx protocol. +pub(crate) struct CustomRlpxConnection { + conn: ProtocolConnection, + initial_ping: Option, + commands: UnboundedReceiverStream, + pending_pong: Option>, +} + +impl Stream for CustomRlpxConnection { + type Item = BytesMut; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + if let Some(initial_ping) = this.initial_ping.take() { + return Poll::Ready(Some(initial_ping.encoded())) + } + + loop { + if let Poll::Ready(Some(cmd)) = this.commands.poll_next_unpin(cx) { + return match cmd { + CustomCommand::Message { msg, response } => { + this.pending_pong = Some(response); + Poll::Ready(Some(CustomRlpxProtoMessage::ping_message(msg).encoded())) + } + } + } + + let Some(msg) = ready!(this.conn.poll_next_unpin(cx)) else { return Poll::Ready(None) }; + + let Some(msg) = CustomRlpxProtoMessage::decode_message(&mut &msg[..]) else { + return Poll::Ready(None) + }; + + match msg.message { + CustomRlpxProtoMessageKind::Ping => { + return Poll::Ready(Some(CustomRlpxProtoMessage::pong().encoded())) + } + CustomRlpxProtoMessageKind::Pong => {} + CustomRlpxProtoMessageKind::PingMessage(msg) => { + return Poll::Ready(Some(CustomRlpxProtoMessage::pong_message(msg).encoded())) + } + CustomRlpxProtoMessageKind::PongMessage(msg) => { + if let Some(sender) = this.pending_pong.take() { + sender.send(msg).ok(); + } + continue + } + } + + return Poll::Pending + } + } +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs new file mode 100644 index 000000000000..53ec0dc1d4e7 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod connection; +pub(crate) mod protocol; diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs new file mode 100644 index 000000000000..ea9e588e592b --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs @@ -0,0 +1,15 @@ +use crate::subprotocol::connection::CustomCommand; +use reth_network::Direction; +use reth_network_api::PeerId; +use tokio::sync::mpsc; + +/// The events that can be emitted by our custom protocol. +#[derive(Debug)] +pub(crate) enum ProtocolEvent { + Established { + #[allow(dead_code)] + direction: Direction, + peer_id: PeerId, + to_connection: mpsc::UnboundedSender, + }, +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs new file mode 100644 index 000000000000..d5a35398dae1 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs @@ -0,0 +1,34 @@ +use super::event::ProtocolEvent; +use crate::subprotocol::connection::handler::CustomRlpxConnectionHandler; +use reth_network::protocol::ProtocolHandler; +use reth_network_api::PeerId; +use std::net::SocketAddr; +use tokio::sync::mpsc; + +/// Protocol state is an helper struct to store the protocol events. +#[derive(Clone, Debug)] +pub(crate) struct ProtocolState { + pub(crate) events: mpsc::UnboundedSender, +} + +/// The protocol handler takes care of incoming and outgoing connections. +#[derive(Debug)] +pub(crate) struct CustomRlpxProtoHandler { + pub state: ProtocolState, +} + +impl ProtocolHandler for CustomRlpxProtoHandler { + type ConnectionHandler = CustomRlpxConnectionHandler; + + fn on_incoming(&self, _socket_addr: SocketAddr) -> Option { + Some(CustomRlpxConnectionHandler { state: self.state.clone() }) + } + + fn on_outgoing( + &self, + _socket_addr: SocketAddr, + _peer_id: PeerId, + ) -> Option { + Some(CustomRlpxConnectionHandler { state: self.state.clone() }) + } +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs new file mode 100644 index 000000000000..8aba9a4e3506 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod event; +pub(crate) mod handler; +pub(crate) mod proto; diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs new file mode 100644 index 000000000000..8b179a447d9f --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs @@ -0,0 +1,113 @@ +//! Simple RLPx Ping Pong protocol that also support sending messages, +//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md) + +use reth_eth_wire::{protocol::Protocol, Capability}; +use reth_primitives::{Buf, BufMut, BytesMut}; + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum CustomRlpxProtoMessageId { + Ping = 0x00, + Pong = 0x01, + PingMessage = 0x02, + PongMessage = 0x03, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum CustomRlpxProtoMessageKind { + Ping, + Pong, + PingMessage(String), + PongMessage(String), +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct CustomRlpxProtoMessage { + pub message_type: CustomRlpxProtoMessageId, + pub message: CustomRlpxProtoMessageKind, +} + +impl CustomRlpxProtoMessage { + /// Returns the capability for the `custom_rlpx` protocol. + pub fn capability() -> Capability { + Capability::new_static("custom_rlpx", 1) + } + + /// Returns the protocol for the `custom_rlpx` protocol. + pub fn protocol() -> Protocol { + Protocol::new(Self::capability(), 4) + } + + /// Creates a ping message + pub fn ping_message(msg: impl Into) -> Self { + Self { + message_type: CustomRlpxProtoMessageId::PingMessage, + message: CustomRlpxProtoMessageKind::PingMessage(msg.into()), + } + } + /// Creates a ping message + pub fn pong_message(msg: impl Into) -> Self { + Self { + message_type: CustomRlpxProtoMessageId::PongMessage, + message: CustomRlpxProtoMessageKind::PongMessage(msg.into()), + } + } + + /// Creates a ping message + pub fn ping() -> Self { + Self { + message_type: CustomRlpxProtoMessageId::Ping, + message: CustomRlpxProtoMessageKind::Ping, + } + } + + /// Creates a pong message + pub fn pong() -> Self { + Self { + message_type: CustomRlpxProtoMessageId::Pong, + message: CustomRlpxProtoMessageKind::Pong, + } + } + + /// Creates a new `CustomRlpxProtoMessage` with the given message ID and payload. + pub fn encoded(&self) -> BytesMut { + let mut buf = BytesMut::new(); + buf.put_u8(self.message_type as u8); + match &self.message { + CustomRlpxProtoMessageKind::Ping | CustomRlpxProtoMessageKind::Pong => {} + CustomRlpxProtoMessageKind::PingMessage(msg) | + CustomRlpxProtoMessageKind::PongMessage(msg) => { + buf.put(msg.as_bytes()); + } + } + buf + } + + /// Decodes a `CustomRlpxProtoMessage` from the given message buffer. + pub fn decode_message(buf: &mut &[u8]) -> Option { + if buf.is_empty() { + return None; + } + let id = buf[0]; + buf.advance(1); + let message_type = match id { + 0x00 => CustomRlpxProtoMessageId::Ping, + 0x01 => CustomRlpxProtoMessageId::Pong, + 0x02 => CustomRlpxProtoMessageId::PingMessage, + 0x03 => CustomRlpxProtoMessageId::PongMessage, + _ => return None, + }; + let message = match message_type { + CustomRlpxProtoMessageId::Ping => CustomRlpxProtoMessageKind::Ping, + CustomRlpxProtoMessageId::Pong => CustomRlpxProtoMessageKind::Pong, + CustomRlpxProtoMessageId::PingMessage => CustomRlpxProtoMessageKind::PingMessage( + String::from_utf8_lossy(&buf[..]).into_owned(), + ), + CustomRlpxProtoMessageId::PongMessage => CustomRlpxProtoMessageKind::PongMessage( + String::from_utf8_lossy(&buf[..]).into_owned(), + ), + }; + + Some(Self { message_type, message }) + } +} diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index e447493c2783..692a1175ded1 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "db-access" +name = "example-db-access" version = "0.0.0" publish = false edition.workspace = true @@ -7,6 +7,7 @@ license.workspace = true [dependencies] +reth-chainspec.workspace = true reth-db.workspace = true reth-primitives.workspace = true reth-provider.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index c43aec47ce0c..27047fd3f8ec 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,5 +1,6 @@ +use reth_chainspec::ChainSpecBuilder; use reth_db::open_db_read_only; -use reth_primitives::{Address, ChainSpecBuilder, B256}; +use reth_primitives::{Address, B256}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, diff --git a/examples/exex/discv5/Cargo.toml b/examples/exex/discv5/Cargo.toml new file mode 100644 index 000000000000..b1777cfa1516 --- /dev/null +++ b/examples/exex/discv5/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "example-exex-discv5" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +discv5.workspace = true +enr.workspace = true + +reth-discv5.workspace = true +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-ethereum.workspace = true +reth-network-peers.workspace = true +reth-tracing.workspace = true +futures.workspace = true + +clap.workspace = true +reth-chainspec.workspace = true +serde_json.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +futures-util.workspace = true + +tracing.workspace = true +eyre.workspace = true + +[dev-dependencies] +reth-exex-test-utils.workspace = true +reth-testing-utils.workspace = true diff --git a/examples/exex/discv5/src/exex/mod.rs b/examples/exex/discv5/src/exex/mod.rs new file mode 100644 index 000000000000..4631f392979c --- /dev/null +++ b/examples/exex/discv5/src/exex/mod.rs @@ -0,0 +1,70 @@ +use eyre::Result; +use futures::{Future, FutureExt}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_tracing::tracing::info; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; +use tracing::error; + +use crate::network::DiscV5ExEx; + +/// The ExEx struct, representing the initialization and execution of the ExEx. +pub struct ExEx { + exex: ExExContext, + disc_v5: DiscV5ExEx, +} + +impl ExEx { + pub fn new(exex: ExExContext, disc_v5: DiscV5ExEx) -> Self { + Self { exex, disc_v5 } + } +} + +impl Future for ExEx { + type Output = Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Poll the Discv5 future until its drained + loop { + match self.disc_v5.poll_unpin(cx) { + Poll::Ready(Ok(())) => { + info!("Discv5 task completed successfully"); + } + Poll::Ready(Err(e)) => { + error!(?e, "Discv5 task encountered an error"); + return Poll::Ready(Err(e)); + } + Poll::Pending => { + // Exit match and continue to poll notifications + break; + } + } + } + + // Continuously poll the ExExContext notifications + loop { + if let Some(notification) = ready!(self.exex.notifications.poll_recv(cx)) { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + } + + if let Some(committed_chain) = notification.committed_chain() { + self.exex + .events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + } + } +} diff --git a/examples/exex/discv5/src/main.rs b/examples/exex/discv5/src/main.rs new file mode 100644 index 000000000000..2374326050b7 --- /dev/null +++ b/examples/exex/discv5/src/main.rs @@ -0,0 +1,29 @@ +use clap::Parser; + +use exex::ExEx; +use network::{cli_ext::Discv5ArgsExt, DiscV5ExEx}; +use reth_node_ethereum::EthereumNode; + +mod exex; +mod network; + +fn main() -> eyre::Result<()> { + reth::cli::Cli::::parse().run(|builder, args| async move { + let tcp_port = args.tcp_port; + let udp_port = args.udp_port; + + let handle = builder + .node(EthereumNode::default()) + .install_exex("exex-discv5", move |ctx| async move { + // start Discv5 task + let disc_v5 = DiscV5ExEx::new(tcp_port, udp_port).await?; + + // start exex task with discv5 + Ok(ExEx::new(ctx, disc_v5)) + }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/examples/exex/discv5/src/network/cli_ext.rs b/examples/exex/discv5/src/network/cli_ext.rs new file mode 100644 index 000000000000..1eb864de3611 --- /dev/null +++ b/examples/exex/discv5/src/network/cli_ext.rs @@ -0,0 +1,15 @@ +use clap::Args; + +pub const DEFAULT_DISCOVERY_PORT: u16 = 30304; +pub const DEFAULT_RLPX_PORT: u16 = 30303; + +#[derive(Debug, Clone, Args)] +pub(crate) struct Discv5ArgsExt { + /// TCP port used by RLPx + #[clap(long = "exex-discv5.tcp-port", default_value_t = DEFAULT_RLPX_PORT)] + pub tcp_port: u16, + + /// UDP port used for discovery + #[clap(long = "exex-discv5.udp-port", default_value_t = DEFAULT_DISCOVERY_PORT)] + pub udp_port: u16, +} diff --git a/examples/exex/discv5/src/network/mod.rs b/examples/exex/discv5/src/network/mod.rs new file mode 100644 index 000000000000..ebab28342d88 --- /dev/null +++ b/examples/exex/discv5/src/network/mod.rs @@ -0,0 +1,123 @@ +#![allow(dead_code)] + +use discv5::{enr::secp256k1::rand, Enr, Event, ListenConfig}; +use reth::network::config::SecretKey; +use reth_discv5::{enr::EnrCombinedKeyWrapper, Config, Discv5}; +use reth_network_peers::NodeRecord; +use reth_tracing::tracing::info; +use std::{ + future::Future, + net::SocketAddr, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::sync::mpsc; + +pub(crate) mod cli_ext; + +/// Helper struct to manage a discovery node using discv5. +pub(crate) struct DiscV5ExEx { + /// The inner discv5 instance. + inner: Discv5, + /// The node record of the discv5 instance. + node_record: NodeRecord, + /// The events stream of the discv5 instance. + events: mpsc::Receiver, +} + +impl DiscV5ExEx { + /// Starts a new discv5 node. + pub async fn new(udp_port: u16, tcp_port: u16) -> eyre::Result { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + + let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port}").parse()?; + let rlpx_addr: SocketAddr = format!("127.0.0.1:{tcp_port}").parse()?; + + let discv5_listen_config = ListenConfig::from(discv5_addr); + let discv5_config = Config::builder(rlpx_addr) + .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) + .build(); + + let (discv5, events, node_record) = Discv5::start(&secret_key, discv5_config).await?; + Ok(Self { inner: discv5, events, node_record }) + } + + /// Adds a node to the table if its not already present. + pub fn add_node(&mut self, enr: Enr) -> eyre::Result<()> { + let reth_enr: enr::Enr = EnrCombinedKeyWrapper(enr.clone()).into(); + self.inner.add_node(reth_enr)?; + Ok(()) + } + + /// Returns the local ENR of the discv5 node. + pub fn local_enr(&self) -> Enr { + self.inner.with_discv5(|discv5| discv5.local_enr()) + } +} + +impl Future for DiscV5ExEx { + type Output = eyre::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.as_mut(); + loop { + match ready!(this.events.poll_recv(cx)) { + Some(evt) => { + if let Event::SessionEstablished(enr, socket_addr) = evt { + info!(?enr, ?socket_addr, "Session established with a new peer."); + } + } + None => return Poll::Ready(Ok(())), + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::network::DiscV5ExEx; + use tracing::info; + + #[tokio::test] + async fn can_establish_discv5_session_with_peer() { + reth_tracing::init_test_tracing(); + let mut node_1 = DiscV5ExEx::new(30301, 30303).await.unwrap(); + let node_1_enr = node_1.local_enr(); + + let mut node_2 = DiscV5ExEx::new(30302, 30303).await.unwrap(); + + let node_2_enr = node_2.local_enr(); + + info!(?node_1_enr, ?node_2_enr, "Started discovery nodes."); + + // add node_2 to node_1 table + node_1.add_node(node_2_enr.clone()).unwrap(); + + // verify node_2 is in node_1 table + assert!(node_1 + .inner + .with_discv5(|discv5| discv5.table_entries_id().contains(&node_2_enr.node_id()))); + + // send ping from node_1 to node_2 + node_1.inner.with_discv5(|discv5| discv5.send_ping(node_2_enr.clone())).await.unwrap(); + + // verify they both established a session + let event_2_v5 = node_2.events.recv().await.unwrap(); + let event_1_v5 = node_1.events.recv().await.unwrap(); + assert!(matches!( + event_1_v5, + discv5::Event::SessionEstablished(node, socket) if node == node_2_enr && socket == node_2_enr.udp4_socket().unwrap().into() + )); + assert!(matches!( + event_2_v5, + discv5::Event::SessionEstablished(node, socket) if node == node_1_enr && socket == node_1_enr.udp4_socket().unwrap().into() + )); + + // verify node_1 is in + let event_2_v5 = node_2.events.recv().await.unwrap(); + assert!(matches!( + event_2_v5, + discv5::Event::NodeInserted { node_id, replaced } if node_id == node_1_enr.node_id() && replaced.is_none() + )); + } +} diff --git a/examples/exex/in-memory-state/Cargo.toml b/examples/exex/in-memory-state/Cargo.toml index a7ae5baee5d5..b79808cb1392 100644 --- a/examples/exex/in-memory-state/Cargo.toml +++ b/examples/exex/in-memory-state/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "exex-in-memory-state" +name = "example-exex-in-memory-state" version = "0.0.0" publish = false edition.workspace = true @@ -11,6 +11,7 @@ reth-exex.workspace = true reth-node-api.workspace = true reth-node-ethereum.workspace = true reth-tracing.workspace = true +reth-execution-types.workspace = true eyre.workspace = true diff --git a/examples/exex/in-memory-state/src/main.rs b/examples/exex/in-memory-state/src/main.rs index cd683147ff05..c56cdcf5044b 100644 --- a/examples/exex/in-memory-state/src/main.rs +++ b/examples/exex/in-memory-state/src/main.rs @@ -1,6 +1,6 @@ #![warn(unused_crate_dependencies)] -use reth::providers::ExecutionOutcome; +use reth_execution_types::ExecutionOutcome; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -73,14 +73,11 @@ fn main() -> eyre::Result<()> { #[cfg(test)] mod tests { - use std::pin::pin; - - use reth::{ - providers::{Chain, ExecutionOutcome}, - revm::db::BundleState, - }; + use reth::revm::db::BundleState; + use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex_test_utils::{test_exex_context, PollOnce}; use reth_testing_utils::generators::{self, random_block, random_receipt}; + use std::pin::pin; #[tokio::test] async fn test_exex() -> eyre::Result<()> { diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index b4a3b4af0ae2..6cf958904129 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "exex-minimal" +name = "example-exex-minimal" version = "0.0.0" publish = false edition.workspace = true @@ -11,6 +11,7 @@ reth-exex.workspace = true reth-node-api.workspace = true reth-node-ethereum.workspace = true reth-tracing.workspace = true +reth-execution-types.workspace = true eyre.workspace = true futures.workspace = true diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index 7f6d4585559e..cb7af242e65d 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -54,7 +54,7 @@ fn main() -> eyre::Result<()> { #[cfg(test)] mod tests { - use reth::providers::{Chain, ExecutionOutcome}; + use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex_test_utils::{test_exex_context, PollOnce}; use std::pin::pin; diff --git a/examples/exex/op-bridge/Cargo.toml b/examples/exex/op-bridge/Cargo.toml index ce1e39db16f0..38693a2c57d8 100644 --- a/examples/exex/op-bridge/Cargo.toml +++ b/examples/exex/op-bridge/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "exex-op-bridge" +name = "example-exex-op-bridge" version = "0.0.0" publish = false edition.workspace = true @@ -11,7 +11,7 @@ reth-exex.workspace = true reth-node-api.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true -reth-provider.workspace = true +reth-execution-types.workspace = true reth-tracing.workspace = true eyre.workspace = true diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index aebe90e193c5..3c69572f24ae 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -1,10 +1,10 @@ use alloy_sol_types::{sol, SolEventInterface}; use futures::Future; +use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; use reth_primitives::{address, Address, Log, SealedBlockWithSenders, TransactionSigned}; -use reth_provider::Chain; use reth_tracing::tracing::info; use rusqlite::Connection; @@ -259,12 +259,12 @@ mod tests { use alloy_sol_types::SolEvent; use reth::revm::db::BundleState; + use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex_test_utils::{test_exex_context, PollOnce}; use reth_primitives::{ Address, Block, Header, Log, Receipt, Transaction, TransactionSigned, TxKind, TxLegacy, TxType, U256, }; - use reth_provider::{Chain, ExecutionOutcome}; use reth_testing_utils::generators::sign_tx_with_random_key_pair; use rusqlite::Connection; diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml index 5a4dcb5f4fea..665d23f225be 100644 --- a/examples/exex/rollup/Cargo.toml +++ b/examples/exex/rollup/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "exex-rollup" +name = "example-exex-rollup" version = "0.0.0" publish = false edition.workspace = true @@ -8,11 +8,13 @@ license.workspace = true [dependencies] # reth reth.workspace = true +reth-chainspec.workspace = true reth-exex.workspace = true reth-node-api.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true reth-execution-errors.workspace = true +reth-execution-types.workspace = true reth-provider.workspace = true reth-revm.workspace = true reth-tracing.workspace = true @@ -22,10 +24,11 @@ tokio.workspace = true # misc alloy-consensus = { workspace = true, features = ["kzg"] } +alloy-genesis.workspace = true alloy-rlp.workspace = true alloy-sol-types = { workspace = true, features = ["json"] } eyre.workspace = true -foundry-blob-explorers = { git = "https://github.com/foundry-rs/block-explorers" } +foundry-blob-explorers = "0.1" once_cell.workspace = true rusqlite = { version = "0.31.0", features = ["bundled"] } serde_json.workspace = true diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs index 2c42beafb93c..dcc8b435ebc5 100644 --- a/examples/exex/rollup/src/db.rs +++ b/examples/exex/rollup/src/db.rs @@ -443,7 +443,7 @@ impl reth_revm::Database for Database { get_storage(&self.connection(), address, index.into()).map(|data| data.unwrap_or_default()) } - fn block_hash(&mut self, number: U256) -> Result { + fn block_hash(&mut self, number: u64) -> Result { let block_hash = self.connection().query_row::( "SELECT hash FROM block WHERE number = ?", (number.to_string(),), diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index deffc3a80e11..a86b16508a4b 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -10,10 +10,9 @@ use reth_primitives::{ constants, eip4844::kzg_to_versioned_hash, keccak256, - revm::env::fill_tx_env, revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, - Address, Block, BlockWithSenders, Bytes, Hardfork, Header, Receipt, TransactionSigned, TxType, - B256, U256, + Address, Block, BlockWithSenders, Bytes, EthereumHardfork, Header, Receipt, TransactionSigned, + TxType, B256, U256, }; use reth_revm::{ db::{states::bundle_state::BundleRetention, BundleState}, @@ -69,16 +68,17 @@ fn construct_header(db: &Database, header: &RollupContract::BlockHeader) -> eyre let block_number = u64::try_from(header.sequence)?; // Calculate base fee per gas for EIP-1559 transactions - let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { - constants::EIP1559_INITIAL_BASE_FEE - } else { - parent_block - .as_ref() - .ok_or(eyre::eyre!("parent block not found"))? - .header - .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) - .ok_or(eyre::eyre!("failed to calculate base fee"))? - }; + let base_fee_per_gas = + if CHAIN_SPEC.fork(EthereumHardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; // Construct header Ok(Header { @@ -103,17 +103,11 @@ fn configure_evm<'a>( .build(), ); evm.db_mut().set_state_clear_flag( - CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + CHAIN_SPEC.fork(EthereumHardfork::SpuriousDragon).active_at_block(header.number), ); let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); - EthEvmConfig::fill_cfg_and_block_env( - &mut cfg, - evm.block_mut(), - &CHAIN_SPEC, - header, - U256::ZERO, - ); + config.fill_cfg_and_block_env(&mut cfg, evm.block_mut(), &CHAIN_SPEC, header, U256::ZERO); *evm.cfg_mut() = cfg.cfg_env; evm @@ -216,7 +210,7 @@ fn execute_transactions( } // Execute transaction. // Fill revm structure. - fill_tx_env(evm.tx_mut(), &transaction, sender); + EthEvmConfig::default().fill_tx_env(evm.tx_mut(), &transaction, sender); let ResultAndState { result, state } = match evm.transact() { Ok(result) => result, @@ -277,7 +271,7 @@ mod tests { bytes, constants::ETH_TO_WEI, keccak256, public_key_to_address, - revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, + revm_primitives::{AccountInfo, ExecutionResult, Output, TxEnv}, BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, }; use reth_revm::Evm; @@ -383,7 +377,7 @@ mod tests { .with_tx_env(TxEnv { caller: sender_address, gas_limit: 50_000_000, - transact_to: TransactTo::Call(weth_address), + transact_to: TxKind::Call(weth_address), data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), ..Default::default() }) @@ -408,7 +402,7 @@ mod tests { .with_tx_env(TxEnv { caller: sender_address, gas_limit: 50_000_000, - transact_to: TransactTo::Call(weth_address), + transact_to: TxKind::Call(weth_address), data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), ..Default::default() }) diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index cf634ca005e7..1cefa4c56252 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -4,18 +4,17 @@ //! The rollup contract accepts blocks of transactions and deposits of ETH and is deployed on //! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . +use alloy_genesis::Genesis; use alloy_sol_types::{sol, SolEventInterface, SolInterface}; use db::Database; use execution::execute_block; use once_cell::sync::Lazy; +use reth_chainspec::{ChainSpec, ChainSpecBuilder}; +use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_primitives::{ - address, Address, ChainSpec, ChainSpecBuilder, Genesis, SealedBlockWithSenders, - TransactionSigned, U256, -}; -use reth_provider::Chain; +use reth_primitives::{address, Address, SealedBlockWithSenders, TransactionSigned, U256}; use reth_tracing::tracing::{error, info}; use rusqlite::Connection; use std::sync::Arc; diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index 5a9a999b9953..ca792a364bf4 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -1,11 +1,12 @@ [package] -name = "manual-p2p" +name = "example-manual-p2p" version = "0.0.0" publish = false edition.workspace = true license.workspace = true [dependencies] +reth-chainspec.workspace = true reth-primitives.workspace = true reth-network.workspace = true reth-discv4.workspace = true diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index a9115922db38..a0d3af61b382 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -10,16 +10,15 @@ use std::time::Duration; use futures::StreamExt; use once_cell::sync::Lazy; +use reth_chainspec::{Chain, MAINNET}; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_network::config::rng_secret_key; -use reth_network_peers::pk2id; -use reth_primitives::{ - mainnet_nodes, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, -}; +use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord}; +use reth_primitives::{EthereumHardfork, Head, MAINNET_GENESIS_HASH}; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::TcpStream; @@ -96,14 +95,14 @@ async fn handshake_p2p( // Perform a ETH Wire handshake with a peer async fn handshake_eth(p2p_stream: AuthedP2PStream) -> eyre::Result<(AuthedEthStream, Status)> { let fork_filter = MAINNET.fork_filter(Head { - timestamp: MAINNET.fork(Hardfork::Shanghai).as_timestamp().unwrap(), + timestamp: MAINNET.fork(EthereumHardfork::Shanghai).as_timestamp().unwrap(), ..Default::default() }); let status = Status::builder() .chain(Chain::mainnet()) .genesis(MAINNET_GENESIS_HASH) - .forkid(MAINNET.hardfork_fork_id(Hardfork::Shanghai).unwrap()) + .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap()) .build(); let status = Status { version: p2p_stream.shared_capabilities().eth()?.version(), ..status }; diff --git a/examples/network-txpool/Cargo.toml b/examples/network-txpool/Cargo.toml index 12544a8f30df..7d4817263b75 100644 --- a/examples/network-txpool/Cargo.toml +++ b/examples/network-txpool/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "network-txpool" +name = "example-network-txpool" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/network/Cargo.toml b/examples/network/Cargo.toml index b3b740dd8ff1..fe7fc381b526 100644 --- a/examples/network/Cargo.toml +++ b/examples/network/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "network" +name = "example-network" version = "0.0.0" publish = false edition.workspace = true @@ -10,4 +10,4 @@ reth-network.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } futures.workspace = true tokio.workspace = true -eyre.workspace = true \ No newline at end of file +eyre.workspace = true diff --git a/examples/node-custom-rpc/Cargo.toml b/examples/node-custom-rpc/Cargo.toml index 473e9acaf9fa..e82254757ec1 100644 --- a/examples/node-custom-rpc/Cargo.toml +++ b/examples/node-custom-rpc/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-custom-rpc" +name = "example-node-custom-rpc" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/node-event-hooks/Cargo.toml b/examples/node-event-hooks/Cargo.toml index eb36722aadee..450f6f006b28 100644 --- a/examples/node-event-hooks/Cargo.toml +++ b/examples/node-event-hooks/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "node-event-hooks" +name = "example-node-event-hooks" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index b1f5c9870839..b3a7af7506b4 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "polygon-p2p" +name = "example-polygon-p2p" version = "0.0.0" publish = false edition.workspace = true @@ -11,9 +11,10 @@ license.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } tokio.workspace = true reth-network.workspace = true +reth-chainspec.workspace = true reth-primitives.workspace = true serde_json.workspace = true reth-tracing.workspace = true tokio-stream.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } -reth-discv4 = { workspace = true, features = ["test-utils"] } \ No newline at end of file +reth-discv4 = { workspace = true, features = ["test-utils"] } diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index 5860cdb1d39d..92256a1be1c3 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,8 +1,10 @@ -use reth_primitives::{ - b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork, Head, NodeRecord, B256, +use reth_chainspec::{ + BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, }; +use reth_discv4::NodeRecord; +use reth_primitives::{b256, Head, B256}; -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; const SHANGAI_BLOCK: u64 = 50523000; @@ -15,16 +17,16 @@ pub(crate) fn polygon_chain_spec() -> Arc { genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), paris_block_and_final_difficulty: None, - hardforks: BTreeMap::from([ - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(3395000)), - (Hardfork::MuirGlacier, ForkCondition::Block(3395000)), - (Hardfork::Berlin, ForkCondition::Block(14750000)), - (Hardfork::London, ForkCondition::Block(23850000)), - (Hardfork::Shanghai, ForkCondition::Block(SHANGAI_BLOCK)), + hardforks: ChainHardforks::new(vec![ + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(3395000)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(3395000)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(14750000)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(23850000)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(SHANGAI_BLOCK)), ]), deposit_contract: None, - base_fee_params: reth_primitives::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), + base_fee_params: reth_chainspec::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: 0, } .into() diff --git a/examples/rpc-db/Cargo.toml b/examples/rpc-db/Cargo.toml index 51f53cd39c29..007a488b8174 100644 --- a/examples/rpc-db/Cargo.toml +++ b/examples/rpc-db/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "rpc-db" +name = "example-rpc-db" version = "0.0.0" publish = false edition.workspace = true @@ -9,8 +9,10 @@ license.workspace = true futures.workspace = true jsonrpsee.workspace = true reth.workspace = true +reth-chainspec.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-node-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } tokio = { workspace = true, features = ["full"] } eyre.workspace = true diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 732e4ad38135..30c0479549fc 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -12,29 +12,28 @@ //! cast rpc myrpcExt_customMethod //! ``` +use std::{path::Path, sync::Arc}; + use reth::{ - primitives::ChainSpecBuilder, providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, }, - utils::db::open_db_read_only, + utils::open_db_read_only, }; +use reth_chainspec::ChainSpecBuilder; use reth_db::mdbx::DatabaseArguments; use reth_db_api::models::ClientVersion; // Bringing up the RPC use reth::rpc::builder::{ - RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, + EthApiBuild, RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, }; // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; -use reth::{ - blockchain_tree::noop::NoopBlockchainTree, providers::test_utils::TestCanonStateSubscriptions, - tasks::TokioTaskExecutor, -}; +use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; use reth_node_ethereum::EthEvmConfig; -use std::{path::Path, sync::Arc}; +use reth_provider::test_utils::TestCanonStateSubscriptions; // Custom rpc extension pub mod myrpc_ext; @@ -71,7 +70,7 @@ async fn main() -> eyre::Result<()> { // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build(config); + let mut server = rpc_builder.build(config, EthApiBuild::build); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; @@ -80,7 +79,7 @@ async fn main() -> eyre::Result<()> { // Start the server & keep it alive let server_args = RpcServerConfig::http(Default::default()).with_http_address("0.0.0.0:8545".parse()?); - let _handle = server_args.start(server).await?; + let _handle = server_args.start(&server).await?; futures::future::pending::<()>().await; Ok(()) diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index d1898b81cb15..e38b6fc24d37 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -3,7 +3,7 @@ use reth::{primitives::Block, providers::BlockReaderIdExt}; // Rpc related imports use jsonrpsee::proc_macros::rpc; -use reth::rpc::eth::error::EthResult; +use reth::rpc::server_types::eth::EthResult; /// trait interface for a custom rpc namespace: `MyRpc` /// diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml new file mode 100644 index 000000000000..2ae4656eee86 --- /dev/null +++ b/examples/stateful-precompile/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "example-stateful-precompile" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-chainspec.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true +reth-primitives.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } +reth-tracing.workspace = true +alloy-genesis.workspace = true + +eyre.workspace = true +parking_lot.workspace = true +schnellru.workspace = true +tokio.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs new file mode 100644 index 000000000000..b595647e0952 --- /dev/null +++ b/examples/stateful-precompile/src/main.rs @@ -0,0 +1,254 @@ +//! This example shows how to implement a node with a custom EVM that uses a stateful precompile + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_genesis::Genesis; +use parking_lot::RwLock; +use reth::{ + builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, + primitives::{ + revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, + Address, Bytes, U256, + }, + revm::{ + handler::register::EvmHandler, + inspector_handle_register, + precompile::{Precompile, PrecompileSpecId}, + ContextPrecompile, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, + }, + tasks::TaskManager, +}; +use reth_chainspec::{Chain, ChainSpec}; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_primitives::{ + revm_primitives::{SpecId, StatefulPrecompileMut}, + Header, TransactionSigned, +}; +use reth_tracing::{RethTracer, Tracer}; +use schnellru::{ByLength, LruMap}; +use std::{collections::HashMap, sync::Arc}; + +/// Type alias for the LRU cache used within the [`PrecompileCache`]. +type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; + +/// Type alias for the thread-safe `Arc>` wrapper around [`PrecompileCache`]. +type CachedPrecompileResult = Arc>; + +/// A cache for precompile inputs / outputs. +/// +/// This assumes that the precompile is a standard precompile, as in `StandardPrecompileFn`, meaning +/// its inputs are only `(Bytes, u64)`. +/// +/// NOTE: This does not work with "context stateful precompiles", ie `ContextStatefulPrecompile` or +/// `ContextStatefulPrecompileMut`. They are explicitly banned. +#[derive(Debug, Default)] +pub struct PrecompileCache { + /// Caches for each precompile input / output. + cache: HashMap<(Address, SpecId), CachedPrecompileResult>, +} + +/// Custom EVM configuration +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct MyEvmConfig { + precompile_cache: Arc>, +} + +impl MyEvmConfig { + /// Sets the precompiles to the EVM handler + /// + /// This will be invoked when the EVM is created via [ConfigureEvm::evm] or + /// [ConfigureEvm::evm_with_inspector] + /// + /// This will use the default mainnet precompiles and wrap them with a cache. + pub fn set_precompiles( + handler: &mut EvmHandler, + cache: Arc>, + ) where + DB: Database, + { + // first we need the evm spec id, which determines the precompiles + let spec_id = handler.cfg.spec_id; + + let mut loaded_precompiles: ContextPrecompiles = + ContextPrecompiles::new(PrecompileSpecId::from_spec_id(spec_id)); + for (address, precompile) in loaded_precompiles.to_mut().iter_mut() { + // get or insert the cache for this address / spec + let mut cache = cache.write(); + let cache = cache + .cache + .entry((*address, spec_id)) + .or_insert(Arc::new(RwLock::new(LruMap::new(ByLength::new(1024))))); + + *precompile = Self::wrap_precompile(precompile.clone(), cache.clone()); + } + + // install the precompiles + handler.pre_execution.load_precompiles = Arc::new(move || loaded_precompiles.clone()); + } + + /// Given a [`ContextPrecompile`] and cache for a specific precompile, create a new precompile + /// that wraps the precompile with the cache. + fn wrap_precompile( + precompile: ContextPrecompile, + cache: Arc>>, + ) -> ContextPrecompile + where + DB: Database, + { + let ContextPrecompile::Ordinary(precompile) = precompile else { + // context stateful precompiles are not supported, due to lifetime issues or skill + // issues + panic!("precompile is not ordinary"); + }; + + let wrapped = WrappedPrecompile { precompile, cache: cache.clone() }; + + ContextPrecompile::Ordinary(Precompile::StatefulMut(Box::new(wrapped))) + } +} + +/// A custom precompile that contains the cache and precompile it wraps. +#[derive(Clone)] +pub struct WrappedPrecompile { + /// The precompile to wrap. + precompile: Precompile, + /// The cache to use. + cache: Arc>>, +} + +impl StatefulPrecompileMut for WrappedPrecompile { + fn call_mut(&mut self, bytes: &Bytes, gas_price: u64, _env: &Env) -> PrecompileResult { + let mut cache = self.cache.write(); + let key = (bytes.clone(), gas_price); + + // get the result if it exists + if let Some(result) = cache.get(&key) { + return result.clone() + } + + // call the precompile if cache miss + let output = self.precompile.call(bytes, gas_price, _env); + cache.insert(key, output.clone()); + + output + } +} + +impl ConfigureEvmEnv for MyEvmConfig { + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) + } + + fn fill_cfg_env( + &self, + cfg_env: &mut CfgEnvWithHandlerCfg, + chain_spec: &ChainSpec, + header: &Header, + total_difficulty: U256, + ) { + EthEvmConfig::default().fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + EthEvmConfig::default().fill_tx_env_system_contract_call(env, caller, contract, data) + } +} + +impl ConfigureEvm for MyEvmConfig { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { + let new_cache = self.precompile_cache.clone(); + EvmBuilder::default() + .with_db(db) + // add additional precompiles + .append_handler_register_box(Box::new(move |handler| { + MyEvmConfig::set_precompiles(handler, new_cache.clone()) + })) + .build() + } + + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + where + DB: Database + 'a, + I: GetInspector, + { + let new_cache = self.precompile_cache.clone(); + EvmBuilder::default() + .with_db(db) + .with_external_context(inspector) + // add additional precompiles + .append_handler_register_box(Box::new(move |handler| { + MyEvmConfig::set_precompiles(handler, new_cache.clone()) + })) + .append_handler_register(inspector_handle_register) + .build() + } +} + +/// Builds a regular ethereum block executor that uses the custom EVM. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct MyExecutorBuilder { + /// The precompile cache to use for all executors. + precompile_cache: Arc>, +} + +impl ExecutorBuilder for MyExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = MyEvmConfig; + type Executor = EthExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let evm_config = MyEvmConfig { precompile_cache: self.precompile_cache.clone() }; + Ok((evm_config.clone(), EthExecutorProvider::new(ctx.chain_spec(), evm_config))) + } +} + +#[tokio::main] +async fn main() -> eyre::Result<()> { + let _guard = RethTracer::new().init()?; + + let tasks = TaskManager::current(); + + // create a custom chain spec + let spec = ChainSpec::builder() + .chain(Chain::mainnet()) + .genesis(Genesis::default()) + .london_activated() + .paris_activated() + .shanghai_activated() + .cancun_activated() + .build(); + + let node_config = + NodeConfig::test().with_rpc(RpcServerArgs::default().with_http()).with_chain(spec); + + let handle = NodeBuilder::new(node_config) + .testing_node(tasks.executor()) + // configure the node with regular ethereum types + .with_types::() + // use default ethereum components but with our executor + .with_components(EthereumNode::components().executor(MyExecutorBuilder::default())) + .launch() + .await + .unwrap(); + + println!("Node started"); + + handle.node_exit_future.await +} diff --git a/examples/txpool-tracing/Cargo.toml b/examples/txpool-tracing/Cargo.toml index 220e5d8d523e..0a3dd2b9b996 100644 --- a/examples/txpool-tracing/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "txpool-tracing" +name = "example-txpool-tracing" version = "0.0.0" publish = false edition.workspace = true @@ -9,4 +9,4 @@ license.workspace = true reth.workspace = true reth-node-ethereum.workspace = true clap = { workspace = true, features = ["derive"] } -futures-util.workspace = true \ No newline at end of file +futures-util.workspace = true diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 85a5b795aad7..c9a14dee18a7 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -28,7 +28,7 @@ fn main() { Cli::::parse() .run(|builder, args| async move { // launch the node - let NodeHandle { mut node, node_exit_future } = + let NodeHandle { node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; // create a new subscription to pending transactions diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index ecacc0a67f7c..74206884a126 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -16,8 +16,9 @@ ef-tests = [] asm-keccak = ["reth-primitives/asm-keccak"] [dependencies] +reth-chainspec.workspace = true reth-primitives.workspace = true -reth-db = { workspace = true, features = ["mdbx", "test-utils"] } +reth-db = { workspace = true, features = ["mdbx", "test-utils", "disable-lock"] } reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index fdfb2a5db6f1..fac7c31c8678 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -102,7 +102,6 @@ impl Case for BlockchainTestCase { ) .try_seal_with_senders() .unwrap(), - None, )?; case.pre.write_to_db(provider.tx_ref())?; @@ -121,7 +120,6 @@ impl Case for BlockchainTestCase { let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?; provider.insert_historical_block( decoded.clone().try_seal_with_senders().unwrap(), - None, )?; Ok::, Error>(Some(decoded)) })?; @@ -189,7 +187,7 @@ pub fn should_skip(path: &Path) -> bool { | "ValueOverflow.json" | "ValueOverflowParis.json" - // txbyte is of type 02 and we dont parse tx bytes for this test to fail. + // txbyte is of type 02 and we don't parse tx bytes for this test to fail. | "typeTwoBerlin.json" // Test checks if nonce overflows. We are handling this correctly but we are not parsing diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index fbb11047e56b..806591c47749 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,15 +1,15 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_db::tables; use reth_db_api::{ cursor::DbDupCursorRO, transaction::{DbTx, DbTxMut}, }; use reth_primitives::{ - keccak256, Account as RethAccount, Address, Bloom, Bytecode, Bytes, ChainSpec, - ChainSpecBuilder, Header as RethHeader, SealedHeader, StorageEntry, Withdrawals, B256, B64, - U256, + keccak256, Account as RethAccount, Address, Bloom, Bytecode, Bytes, Header as RethHeader, + SealedHeader, StorageEntry, Withdrawals, B256, B64, U256, }; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; diff --git a/wvm-apps/wvm-exexed/Cargo.toml b/wvm-apps/wvm-exexed/Cargo.toml index ea2392fa80b2..9f6debdc412b 100644 --- a/wvm-apps/wvm-exexed/Cargo.toml +++ b/wvm-apps/wvm-exexed/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "wvm-exexed" -version = "0.1.0" -rust-version = "1.76" -edition = "2021" +version = "1.0.0" +rust-version.workspace = true +edition.workspace = true [lints] workspace = true @@ -18,13 +18,12 @@ futures.workspace = true alloy-primitives.workspace = true tokio.workspace = true -## bigquery dependencies +# bigquery dependencies gcp-bigquery-client = "0.17.0" indexmap = "2.0.0" serde_json = "1.0" serde = { version = "1.0", features = ["derive"] } -### repository = { path = "crates/repository" } bigquery = { path = "crates/bigquery" } lambda = { path = "crates/lambda" } @@ -35,9 +34,6 @@ types = { path = "crates/types" } reth-exex-test-utils.workspace = true reth-testing-utils.workspace = true -#[workspace] -#members = ["crates/*"] - [[bin]] name = "reth" path = "crates/reth-exexed/src/main.rs" diff --git a/wvm-apps/wvm-exexed/crates/bigquery/src/client.rs b/wvm-apps/wvm-exexed/crates/bigquery/src/client.rs index 7ecbeb3adbf0..42d5504b783c 100644 --- a/wvm-apps/wvm-exexed/crates/bigquery/src/client.rs +++ b/wvm-apps/wvm-exexed/crates/bigquery/src/client.rs @@ -1,5 +1,5 @@ use indexmap::IndexMap; -use std::{any::Any, collections::HashMap}; +use std::collections::HashMap; use gcp_bigquery_client::{ error::BQError, diff --git a/wvm-apps/wvm-exexed/crates/lambda/src/lambda.rs b/wvm-apps/wvm-exexed/crates/lambda/src/lambda.rs index 3bce83469c14..9a0067b3cc71 100644 --- a/wvm-apps/wvm-exexed/crates/lambda/src/lambda.rs +++ b/wvm-apps/wvm-exexed/crates/lambda/src/lambda.rs @@ -1,13 +1,9 @@ -use reqwest::Client; use reth::{ api::FullNodeComponents, - primitives::{address, Address, TransactionSigned}, + primitives::{Address, TransactionSigned}, }; use reth_exex::ExExContext; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; use serde_json::{self, json}; -use std::collections::HashMap; pub const SEQ_ADDRESS: &str = "0x197f818c1313DC58b32D88078ecdfB40EA822614"; pub const LAMBDA_ENDPOINT: &str = "https://wvm-lambda-0755acbdae90.herokuapp.com"; diff --git a/wvm-apps/wvm-exexed/crates/reth-exexed/Cargo.toml b/wvm-apps/wvm-exexed/crates/reth-exexed/Cargo.toml index 97a7a0b462b3..04d3782d768f 100644 --- a/wvm-apps/wvm-exexed/crates/reth-exexed/Cargo.toml +++ b/wvm-apps/wvm-exexed/crates/reth-exexed/Cargo.toml @@ -9,14 +9,12 @@ rust-version.workspace = true [dependencies] reth.workspace = true reth-exex.workspace = true -reth-node-api.workspace = true reth-node-ethereum.workspace = true reth-tracing.workspace = true eyre.workspace = true -futures.workspace = true -tokio.workspace = true + + serde_json.workspace = true -serde.workspace = true repository = { path = "../repository" } bigquery = { path = "../bigquery" } diff --git a/wvm-apps/wvm-exexed/crates/reth-exexed/src/main.rs b/wvm-apps/wvm-exexed/crates/reth-exexed/src/main.rs index 5590550883a1..01436af99e43 100644 --- a/wvm-apps/wvm-exexed/crates/reth-exexed/src/main.rs +++ b/wvm-apps/wvm-exexed/crates/reth-exexed/src/main.rs @@ -1,3 +1,7 @@ +//! WVM node main + +#![doc(issue_tracker_base_url = "https://github.com/weaveVM/wvm-reth/issues/")] + use bigquery::client::BigQueryConfig; use lambda::lambda::exex_lambda_processor; use repository::state_repository; @@ -6,14 +10,12 @@ use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; use serde_json; -use std::path::Path; -use tokio; use types::types::ExecutionTipState; async fn exex_etl_processor( mut ctx: ExExContext, - state_repository: repository::state_repository::StateRepository, - state_processor: exex_etl::state_processor::StateProcessor, + state_repository: state_repository::StateRepository, + _state_processor: exex_etl::state_processor::StateProcessor, ) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.recv().await { match ¬ification { @@ -45,6 +47,7 @@ async fn exex_etl_processor( Ok(()) } +/// Main loop of the WVM node fn main() -> eyre::Result<()> { reth::cli::Cli::parse_args().run(|builder, _| async move { let handle = builder diff --git a/wvm-apps/wvm-exexed/crates/types/src/types.rs b/wvm-apps/wvm-exexed/crates/types/src/types.rs index d4b26f24636e..3431127e2164 100644 --- a/wvm-apps/wvm-exexed/crates/types/src/types.rs +++ b/wvm-apps/wvm-exexed/crates/types/src/types.rs @@ -1,5 +1,5 @@ use alloy_primitives; -use reth::{primitives::SealedBlockWithSenders, providers::ExecutionOutcome}; +use reth::primitives::SealedBlockWithSenders; use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)]