) -> eyre::Result<()> {
+ while let Some(notification) = ctx.notifications.recv().await {
+ match ¬ification {
+ ExExNotification::ChainCommitted { new } => {
+ info!(committed_chain = ?new.range(), "Received commit");
+ }
+ ExExNotification::ChainReorged { old, new } => {
+ info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
+ }
+ ExExNotification::ChainReverted { old } => {
+ info!(reverted_chain = ?old.range(), "Received revert");
+ }
+ };
+
+ if let Some(committed_chain) = notification.committed_chain() {
+ ctx.events
+ .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?;
+ }
+ }
+
+ Ok(())
+}
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let handle = builder
+ .node(EthereumNode::default())
+ .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) })
+ .launch()
+ .await?;
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+Woah, there's a lot of new stuff here! Let's go through it step by step:
+
+- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in.
+ - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in.
+- Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification.
+ - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg.
+- Finally, we're checking if the notification contains a committed chain, and if it does, we're sending a `ExExEvent::FinishedHeight` event back to the main node using the `ctx.events.send` method.
+
+
+
+Sending an `ExExEvent::FinishedHeight` event is a very important part of every ExEx.
+
+It's the only way to communicate to the main node that the ExEx has finished processing the specified height
+and it's safe to prune the associated data.
+
+
+
+What we've arrived at is the [minimal ExEx example](https://github.com/paradigmxyz/reth/blob/b8cd7be6c92a71aea5341cdeba685f124c6de540/examples/exex/minimal/src/main.rs) that we provide in the Reth repository.
+
+## What's next?
+
+Let's do something a bit more interesting, and see how you can [keep track of some state](./tracking-state.md) inside your ExEx.
diff --git a/book/developers/exex/how-it-works.md b/book/developers/exex/how-it-works.md
new file mode 100644
index 000000000000..7fd179bf9155
--- /dev/null
+++ b/book/developers/exex/how-it-works.md
@@ -0,0 +1,26 @@
+# How do ExExes work?
+
+ExExes are just [Futures](https://doc.rust-lang.org/std/future/trait.Future.html) that run indefinitely alongside Reth
+– as simple as that.
+
+An ExEx is usually driven by and acts on new notifications about chain commits, reverts, and reorgs, but it can span beyond that.
+
+They are installed into the node by using the [node builder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html).
+Reth manages the lifecycle of all ExExes, including:
+- Polling ExEx futures
+- Sending [notifications](https://reth.rs/docs/reth_exex/enum.ExExNotification.html) about new chain, reverts,
+ and reorgs from historical and live sync
+- Processing [events](https://reth.rs/docs/reth_exex/enum.ExExEvent.html) emitted by ExExes
+- Pruning (in case of a full or pruned node) only the data that have been processed by all ExExes
+- Shutting ExExes down when the node is shut down
+
+## Pruning
+
+Pruning deserves a special mention here.
+
+ExExes **SHOULD** emit an [`ExExEvent::FinishedHeight`](https://reth.rs/docs/reth_exex/enum.ExExEvent.html#variant.FinishedHeight)
+event to signify what blocks have been processed. This event is used by Reth to determine what state can be pruned.
+
+An ExEx will only receive notifications for block numbers greater than the block in the most recently emitted `FinishedHeight` event.
+
+To clarify: if an ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for any `block_number > 0`.
diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md
new file mode 100644
index 000000000000..e0caa72f62d3
--- /dev/null
+++ b/book/developers/exex/remote.md
@@ -0,0 +1,490 @@
+# Remote Execution Extensions
+
+In this chapter, we will learn how to create an ExEx that emits all notifications to an external process.
+
+We will use [Tonic](https://github.com/hyperium/tonic) to create a gRPC server and a client.
+- The server binary will have the Reth client, our ExEx and the gRPC server.
+- The client binary will have the gRPC client that connects to the server.
+
+## Prerequisites
+
+See [section](https://github.com/hyperium/tonic?tab=readme-ov-file#dependencies) of the Tonic documentation
+to install the required dependencies.
+
+## Create a new project
+
+Let's create a new project. Don't forget to provide the `--lib` flag to `cargo new`,
+because we will have two custom binaries in this project that we will create manually.
+
+```console
+$ cargo new --lib exex-remote
+$ cd exex-remote
+```
+
+We will also need a bunch of dependencies. Some of them you know from the [Hello World](./hello-world.md) chapter,
+but some of specific to what we need now.
+
+```toml
+[package]
+name = "remote-exex"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+# reth
+reth = { git = "https://github.com/paradigmxyz/reth.git" }
+reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] }
+reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"}
+reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" }
+
+# async
+tokio = { version = "1", features = ["full"] }
+tokio-stream = "0.1"
+futures-util = "0.3"
+
+# grpc
+tonic = "0.11"
+prost = "0.12"
+bincode = "1"
+
+# misc
+eyre = "0.6"
+
+[build-dependencies]
+tonic-build = "0.11"
+
+[[bin]]
+name = "exex"
+path = "src/exex.rs"
+
+[[bin]]
+name = "consumer"
+path = "src/consumer.rs"
+```
+
+We also added a build dependency for Tonic. We will use it to generate the Rust code for our
+Protobuf definitions at compile time. Read more about using Tonic in the
+[introductory tutorial](https://github.com/hyperium/tonic/blob/6a213e9485965db0628591e30577ed81cdaeaf2b/examples/helloworld-tutorial.md).
+
+Also, we now have two separate binaries:
+- `exex` is the server binary that will run the ExEx and the gRPC server.
+- `consumer` is the client binary that will connect to the server and receive notifications.
+
+### Create the Protobuf definitions
+
+In the root directory of your project (not `src`), create a new directory called `proto` and a file called `exex.proto`.
+
+We define a service called `RemoteExEx` that exposes a single method called `Subscribe`.
+This method streams notifications to the client.
+
+
+
+A proper way to represent the notification would be to define all fields in the schema, but it goes beyond the scope
+of this chapter.
+
+For an example of a full schema, see the [Remote ExEx](https://github.com/paradigmxyz/reth-exex-grpc/blob/22b26f7beca1c74577d28be3b3838eb352747be0/proto/exex.proto) example.
+
+
+
+```protobuf
+syntax = "proto3";
+
+package exex;
+
+service RemoteExEx {
+ rpc Subscribe(SubscribeRequest) returns (stream ExExNotification) {}
+}
+
+message SubscribeRequest {}
+
+message ExExNotification {
+ bytes data = 1;
+}
+```
+
+To instruct Tonic to generate the Rust code using this `.proto`, add the following lines to your `lib.rs` file:
+```rust,norun,noplayground,ignore
+pub mod proto {
+ tonic::include_proto!("exex");
+}
+```
+
+## ExEx and gRPC server
+
+We will now create the ExEx and the gRPC server in our `src/exex.rs` file.
+
+### gRPC server
+
+Let's create a minimal gRPC server that listens on the port `:10000`, and spawn it using
+the [NodeBuilder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html)'s [task executor](https://reth.rs/docs/reth/tasks/struct.TaskExecutor.html).
+
+```rust,norun,noplayground,ignore
+use remote_exex::proto::{
+ self,
+ remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
+};
+use reth_exex::ExExNotification;
+use reth_node_ethereum::EthereumNode;
+use reth_tracing::tracing::info;
+use std::sync::Arc;
+use tokio::sync::{broadcast, mpsc};
+use tokio_stream::wrappers::ReceiverStream;
+use tonic::{transport::Server, Request, Response, Status};
+
+struct ExExService {}
+
+#[tonic::async_trait]
+impl RemoteExEx for ExExService {
+ type SubscribeStream = ReceiverStream>;
+
+ async fn subscribe(
+ &self,
+ _request: Request,
+ ) -> Result, Status> {
+ let (_tx, rx) = mpsc::channel(1);
+
+ Ok(Response::new(ReceiverStream::new(rx)))
+ }
+}
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let server = Server::builder()
+ .add_service(RemoteExExServer::new(ExExService {}))
+ .serve("[::1]:10000".parse().unwrap());
+
+ let handle = builder.node(EthereumNode::default()).launch().await?;
+
+ handle
+ .node
+ .task_executor
+ .spawn_critical("gRPC server", async move {
+ server.await.expect("failed to start gRPC server")
+ });
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+Currently, it does not send anything on the stream.
+We need to create a communication channel between our future ExEx and this gRPC server
+to send new `ExExNotification` on it.
+
+Let's create this channel in the `main` function where we will have both gRPC server and ExEx initiated,
+and save the sender part (that way we will be able to create new receivers) of this channel in our gRPC server.
+
+```rust,norun,noplayground,ignore
+// ...
+use reth_exex::{ExExNotification};
+
+struct ExExService {
+ notifications: Arc>,
+}
+
+...
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let notifications = Arc::new(broadcast::channel(1).0);
+
+ let server = Server::builder()
+ .add_service(RemoteExExServer::new(ExExService {
+ notifications: notifications.clone(),
+ }))
+ .serve("[::1]:10000".parse().unwrap());
+
+ let handle = builder
+ .node(EthereumNode::default())
+ .launch()
+ .await?;
+
+ handle
+ .node
+ .task_executor
+ .spawn_critical("gRPC server", async move {
+ server.await.expect("failed to start gRPC server")
+ });
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+And with that, we're ready to handle incoming notifications, serialize them with [bincode](https://docs.rs/bincode/)
+and send back to the client.
+
+For each incoming request, we spawn a separate tokio task that will run in the background,
+and then return the stream receiver to the client.
+
+```rust,norun,noplayground,ignore
+// ...
+
+#[tonic::async_trait]
+impl RemoteExEx for ExExService {
+ type SubscribeStream = ReceiverStream>;
+
+ async fn subscribe(
+ &self,
+ _request: Request,
+ ) -> Result, Status> {
+ let (tx, rx) = mpsc::channel(1);
+
+ let mut notifications = self.notifications.subscribe();
+ tokio::spawn(async move {
+ while let Ok(notification) = notifications.recv().await {
+ let proto_notification = proto::ExExNotification {
+ data: bincode::serialize(¬ification).expect("failed to serialize"),
+ };
+ tx.send(Ok(proto_notification))
+ .await
+ .expect("failed to send notification to client");
+
+ info!("Notification sent to the gRPC client");
+ }
+ });
+
+ Ok(Response::new(ReceiverStream::new(rx)))
+ }
+}
+
+// ...
+```
+
+That's it for the gRPC server part! It doesn't receive anything on the `notifications` channel yet,
+but we will fix it with our ExEx.
+
+### ExEx
+
+Now, let's define the ExEx part of our binary.
+
+Our ExEx accepts a `notifications` channel and redirects all incoming `ExExNotification`s to it.
+
+
+
+Don't forget to emit `ExExEvent::FinishedHeight`
+
+
+
+```rust,norun,noplayground,ignore
+// ...
+use reth_exex::{ExExContext, ExExEvent};
+
+async fn remote_exex(
+ mut ctx: ExExContext,
+ notifications: Arc>,
+) -> eyre::Result<()> {
+ while let Some(notification) = ctx.notifications.recv().await {
+ if let Some(committed_chain) = notification.committed_chain() {
+ ctx.events
+ .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?;
+ }
+
+ info!("Notification sent to the gRPC server");
+ let _ = notifications.send(notification);
+ }
+
+ Ok(())
+}
+
+// ...
+```
+
+All that's left is to connect all pieces together: install our ExEx in the node and pass the sender part
+of communication channel to it.
+
+```rust,norun,noplayground,ignore
+// ...
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let notifications = Arc::new(broadcast::channel(1).0);
+
+ let server = Server::builder()
+ .add_service(RemoteExExServer::new(ExExService {
+ notifications: notifications.clone(),
+ }))
+ .serve("[::1]:10000".parse().unwrap());
+
+ let handle = builder
+ .node(EthereumNode::default())
+ .install_exex("remote-exex", |ctx| async move {
+ Ok(remote_exex(ctx, notifications))
+ })
+ .launch()
+ .await?;
+
+ handle
+ .node
+ .task_executor
+ .spawn_critical("gRPC server", async move {
+ server.await.expect("failed to start gRPC server")
+ });
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+### Full `exex.rs` code
+
+
+Click to expand
+
+```rust,norun,noplayground,ignore
+use remote_exex::proto::{
+ self,
+ remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
+};
+use reth::api::FullNodeComponents;
+use reth_exex::{ExExContext, ExExEvent, ExExNotification};
+use reth_node_ethereum::EthereumNode;
+use reth_tracing::tracing::info;
+use std::sync::Arc;
+use tokio::sync::{broadcast, mpsc};
+use tokio_stream::wrappers::ReceiverStream;
+use tonic::{transport::Server, Request, Response, Status};
+
+struct ExExService {
+ notifications: Arc>,
+}
+
+#[tonic::async_trait]
+impl RemoteExEx for ExExService {
+ type SubscribeStream = ReceiverStream>;
+
+ async fn subscribe(
+ &self,
+ _request: Request,
+ ) -> Result, Status> {
+ let (tx, rx) = mpsc::channel(1);
+
+ let mut notifications = self.notifications.subscribe();
+ tokio::spawn(async move {
+ while let Ok(notification) = notifications.recv().await {
+ let proto_notification = proto::ExExNotification {
+ data: bincode::serialize(¬ification).expect("failed to serialize"),
+ };
+ tx.send(Ok(proto_notification))
+ .await
+ .expect("failed to send notification to client");
+
+ info!(?notification, "Notification sent to the gRPC client");
+ }
+ });
+
+ Ok(Response::new(ReceiverStream::new(rx)))
+ }
+}
+
+async fn remote_exex(
+ mut ctx: ExExContext,
+ notifications: Arc>,
+) -> eyre::Result<()> {
+ while let Some(notification) = ctx.notifications.recv().await {
+ if let Some(committed_chain) = notification.committed_chain() {
+ ctx.events
+ .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?;
+ }
+
+ info!(?notification, "Notification sent to the gRPC server");
+ let _ = notifications.send(notification);
+ }
+
+ Ok(())
+}
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let notifications = Arc::new(broadcast::channel(1).0);
+
+ let server = Server::builder()
+ .add_service(RemoteExExServer::new(ExExService {
+ notifications: notifications.clone(),
+ }))
+ .serve("[::1]:10000".parse().unwrap());
+
+ let handle = builder
+ .node(EthereumNode::default())
+ .install_exex("remote-exex", |ctx| async move {
+ Ok(remote_exex(ctx, notifications))
+ })
+ .launch()
+ .await?;
+
+ handle
+ .node
+ .task_executor
+ .spawn_critical("gRPC server", async move {
+ server.await.expect("failed to start gRPC server")
+ });
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+
+## Consumer
+
+Consumer will be a much simpler binary that just connects to our gRPC server and prints out all the notifications
+it receives.
+
+
+
+We need to increase maximum message encoding and decoding sizes to `usize::MAX`,
+because notifications can get very heavy
+
+
+
+```rust,norun,noplayground,ignore
+use remote_exex::proto::{remote_ex_ex_client::RemoteExExClient, SubscribeRequest};
+use reth_exex::ExExNotification;
+use reth_tracing::{tracing::info, RethTracer, Tracer};
+
+#[tokio::main]
+async fn main() -> eyre::Result<()> {
+ let _ = RethTracer::new().init()?;
+
+ let mut client = RemoteExExClient::connect("http://[::1]:10000")
+ .await?
+ .max_encoding_message_size(usize::MAX)
+ .max_decoding_message_size(usize::MAX);
+
+ let mut stream = client.subscribe(SubscribeRequest {}).await?.into_inner();
+ while let Some(notification) = stream.message().await? {
+ let notification: ExExNotification = bincode::deserialize(¬ification.data)?;
+
+ match notification {
+ ExExNotification::ChainCommitted { new } => {
+ info!(committed_chain = ?new.range(), "Received commit");
+ }
+ ExExNotification::ChainReorged { old, new } => {
+ info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
+ }
+ ExExNotification::ChainReverted { old } => {
+ info!(reverted_chain = ?old.range(), "Received revert");
+ }
+ };
+ }
+
+ Ok(())
+}
+```
+
+## Running
+
+In one terminal window, we will run our ExEx and gRPC server. It will start syncing Reth on the Holesky chain
+and use Etherscan in place of a real Consensus Client.
+
+```console
+cargo run --bin exex --release -- node --chain holesky --debug.etherscan
+```
+
+And in the other, we will run our consumer:
+
+```console
+cargo run --bin consumer --release
+```
+
+
diff --git a/book/developers/exex/tracking-state.md b/book/developers/exex/tracking-state.md
new file mode 100644
index 000000000000..5fe8b1c9ef83
--- /dev/null
+++ b/book/developers/exex/tracking-state.md
@@ -0,0 +1,193 @@
+# Tracking State
+
+In this chapter, we'll learn how to keep track of some state inside our ExEx.
+
+Let's continue with our Hello World example from the [previous chapter](./hello-world.md).
+
+### Turning ExEx into a struct
+
+First, we need to turn our ExEx into a stateful struct.
+
+Before, we had just an async function, but now we'll need to implement
+the [`Future`](https://doc.rust-lang.org/std/future/trait.Future.html) trait manually.
+
+
+
+Having a stateful async function is also possible, but it makes testing harder,
+because you can't access variables inside the function to assert the state of your ExEx.
+
+
+
+```rust,norun,noplayground,ignore
+use std::{
+ future::Future,
+ pin::Pin,
+ task::{ready, Context, Poll},
+};
+
+use reth::api::FullNodeComponents;
+use reth_exex::{ExExContext, ExExEvent, ExExNotification};
+use reth_node_ethereum::EthereumNode;
+use reth_tracing::tracing::info;
+
+struct MyExEx {
+ ctx: ExExContext,
+}
+
+impl Future for MyExEx {
+ type Output = eyre::Result<()>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll {
+ let this = self.get_mut();
+
+ while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) {
+ match ¬ification {
+ ExExNotification::ChainCommitted { new } => {
+ info!(committed_chain = ?new.range(), "Received commit");
+ }
+ ExExNotification::ChainReorged { old, new } => {
+ info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
+ }
+ ExExNotification::ChainReverted { old } => {
+ info!(reverted_chain = ?old.range(), "Received revert");
+ }
+ };
+
+ if let Some(committed_chain) = notification.committed_chain() {
+ this.ctx
+ .events
+ .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?;
+ }
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let handle = builder
+ .node(EthereumNode::default())
+ .install_exex("my-exex", |ctx| async move { Ok(MyExEx { ctx }) })
+ .launch()
+ .await?;
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+For those who are not familiar with how async Rust works on a lower level, that may seem scary,
+but let's unpack what's going on here:
+
+1. Our ExEx is now a `struct` that contains the context and implements the `Future` trait. It's now pollable (hence `await`-able).
+1. We can't use `self` directly inside our `poll` method, and instead need to acquire a mutable reference to the data inside of the `Pin`.
+ Read more about pinning in [the book](https://rust-lang.github.io/async-book/04_pinning/01_chapter.html).
+1. We also can't use `await` directly inside `poll`, and instead need to poll futures manually.
+ We wrap the call to `poll_recv(cx)` into a [`ready!`](https://doc.rust-lang.org/std/task/macro.ready.html) macro,
+ so that if the channel of notifications has no value ready, we will instantly return `Poll::Pending` from our Future.
+1. We initialize and return the `MyExEx` struct directly in the `install_exex` method, because it's a Future.
+
+With all that done, we're now free to add more fields to our `MyExEx` struct, and track some state in them.
+
+### Adding state
+
+Our ExEx will count the number of transactions in each block and log it to the console.
+
+```rust,norun,noplayground,ignore
+use std::{
+ future::Future,
+ pin::Pin,
+ task::{ready, Context, Poll},
+};
+
+use reth::{api::FullNodeComponents, primitives::BlockNumber};
+use reth_exex::{ExExContext, ExExEvent};
+use reth_node_ethereum::EthereumNode;
+use reth_tracing::tracing::info;
+
+struct MyExEx {
+ ctx: ExExContext,
+ /// First block that was committed since the start of the ExEx.
+ first_block: Option,
+ /// Total number of transactions committed.
+ transactions: u64,
+}
+
+impl MyExEx {
+ fn new(ctx: ExExContext) -> Self {
+ Self {
+ ctx,
+ first_block: None,
+ transactions: 0,
+ }
+ }
+}
+
+impl Future for MyExEx {
+ type Output = eyre::Result<()>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll {
+ let this = self.get_mut();
+
+ while let Some(notification) = ready!(this.ctx.notifications.poll_recv(cx)) {
+ if let Some(reverted_chain) = notification.reverted_chain() {
+ this.transactions = this.transactions.saturating_sub(
+ reverted_chain
+ .blocks_iter()
+ .map(|b| b.body.len() as u64)
+ .sum(),
+ );
+ }
+
+ if let Some(committed_chain) = notification.committed_chain() {
+ this.first_block.get_or_insert(committed_chain.first().number);
+
+ this.transactions += committed_chain
+ .blocks_iter()
+ .map(|b| b.body.len() as u64)
+ .sum::();
+
+ this.ctx
+ .events
+ .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?;
+ }
+
+ if let Some(first_block) = this.first_block {
+ info!(%first_block, transactions = %this.transactions, "Total number of transactions");
+ }
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+fn main() -> eyre::Result<()> {
+ reth::cli::Cli::parse_args().run(|builder, _| async move {
+ let handle = builder
+ .node(EthereumNode::default())
+ .install_exex("my-exex", |ctx| async move { Ok(MyExEx::new(ctx)) })
+ .launch()
+ .await?;
+
+ handle.wait_for_node_exit().await
+ })
+}
+```
+
+As you can see, we added two fields to our ExEx struct:
+- `first_block` to keep track of the first block that was committed since the start of the ExEx.
+- `transactions` to keep track of the total number of transactions committed, accounting for reorgs and reverts.
+
+We also changed our `match` block to two `if` clauses:
+- First one checks if there's a reverted chain using `notification.reverted_chain()`. If there is:
+ - We subtract the number of transactions in the reverted chain from the total number of transactions.
+ - It's important to do the `saturating_sub` here, because if we just started our node and
+ instantly received a reorg, our `transactions` field will still be zero.
+- Second one checks if there's a committed chain using `notification.committed_chain()`. If there is:
+ - We update the `first_block` field to the first block of the committed chain.
+ - We add the number of transactions in the committed chain to the total number of transactions.
+ - We send a `FinishedHeight` event back to the main node.
+
+Finally, on every notification, we log the total number of transactions and
+the first block that was committed since the start of the ExEx.
diff --git a/book/developers/profiling.md b/book/developers/profiling.md
index 884032b2ac88..f1fdf520eb2e 100644
--- a/book/developers/profiling.md
+++ b/book/developers/profiling.md
@@ -41,12 +41,12 @@ cargo build --features jemalloc-prof
```
When performing a longer-running or performance-sensitive task with reth, such as a sync test or load benchmark, it's usually recommended to use the `maxperf` profile. However, the `maxperf`
-profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `debug-fast`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile:
+profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `profiling`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile:
```
-cargo build --features jemalloc-prof --profile debug-fast
+cargo build --features jemalloc-prof --profile profiling
# May improve performance even more
-RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile debug-fast
+RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile profiling
```
### Monitoring memory usage
diff --git a/book/intro.md b/book/intro.md
index 1a334fbb170e..077cfed30883 100644
--- a/book/intro.md
+++ b/book/intro.md
@@ -1,12 +1,11 @@
# Reth Book
-_Documentation for Reth users and developers._
+_Documentation for Reth users and developers._
[![Telegram Chat][tg-badge]][tg-url]
-Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.**
+Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.**
-Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime servi
-ces. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities.
+Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities.
@@ -56,12 +55,12 @@ We want to solve for node operators that care about fast historical queries, but
We also want to support teams and individuals who want both sync from genesis and via "fast sync".
-We envision that Reth will be configurable enough for the tradeoffs that each team faces.
+We envision that Reth will be configurable enough for the tradeoffs that each team faces.
## Who is this for?
Reth is a new Ethereum full node that allows users to sync and interact with the entire blockchain, including its historical state if in archive mode.
-- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process.
+- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process.
- Archive node: It can also be used as an archive node, which stores the entire history of the blockchain and is useful for applications that need access to historical data.
As a data engineer/analyst, or as a data indexer, you'll want to use Archive mode. For all other use cases where historical access is not needed, you can use Full mode.
@@ -76,7 +75,10 @@ Reth implements the specification of Ethereum as defined in the [ethereum/execut
1. We operate multiple nodes at the tip of Ethereum mainnet and various testnets.
1. We extensively unit test, fuzz test and document all our code, while also restricting PRs with aggressive lint rules.
-We intend to also audit / fuzz the EVM & parts of the codebase. Please reach out if you're interested in collaborating on securing this codebase.
+We have completed an audit of the [Reth v1.0.0-rc.2](https://github.com/paradigmxyz/reth/releases/tag/v1.0.0-rc.2) with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](https://github.com/paradigmxyz/reth/blob/main/audit/sigma_prime_audit_v2.pdf).
+
+[Revm](https://github.com/bluealloy/revm) (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon.
+
## Sections
diff --git a/book/run/optimism.md b/book/run/optimism.md
index a44d0b603fc2..3e4c76b7c763 100644
--- a/book/run/optimism.md
+++ b/book/run/optimism.md
@@ -83,10 +83,12 @@ op-node \
--l2.jwt-secret=/path/to/jwt.hex \
--rpc.addr=0.0.0.0 \
--rpc.port=7000 \
- --l1.trustrpc \
--l1.beacon=
+ --syncmode=execution-layer
```
+Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost.
+
If you opted to build the `op-node` with the `rethdb` build tag, this feature can be enabled by appending one extra flag to the `op-node` invocation:
> Note, the `reth_db_path` is the path to the `db` folder inside of the reth datadir, not the `mdbx.dat` file itself. This can be fetched from `op-reth db path [--chain ]`, or if you are using a custom datadir location via the `--datadir` flag,
@@ -101,7 +103,7 @@ op-node \
[l1-el-spec]: https://github.com/ethereum/execution-specs
[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md
[op-geth-forkdiff]: https://op-geth.optimism.io
-[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/introduction.md#sequencers
+[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers
[op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs
[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md
[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md
diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md
index c85ff7d54786..3a987e52c73a 100644
--- a/book/run/private-testnet.md
+++ b/book/run/private-testnet.md
@@ -2,105 +2,96 @@
For those who need a private testnet to validate functionality or scale with Reth.
## Using Docker locally
-This guide uses [Kurtosis' ethereum-package](https://github.com/kurtosis-tech/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine.
+This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine.
* Go [here](https://docs.kurtosis.com/install/) to install Kurtosis
* Go [here](https://docs.docker.com/get-docker/) to install Docker
-The [`ethereum-package`](https://github.com/kurtosis-tech/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations.
+The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations.
-To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/ethereum-package#configuration).
+To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/ethpandaops/ethereum-package#configuration).
-Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/kurtosis-tech/ethereum-package/).
+Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/ethpandaops/ethereum-package/).
### Step 1: Define the parameters and shape of your private network
-First, in your home directory, create a file with the name `network_params.json` with the following contents:
-```json
-{
- "participants": [
- {
- "el_type": "reth",
- "el_image": "ghcr.io/paradigmxyz/reth",
- "cl_type": "lighthouse",
- "cl_image": "sigp/lighthouse:latest",
- "count": 1
- },
- {
- "el_type": "reth",
- "el_image": "ghcr.io/paradigmxyz/reth",
- "cl_type": "teku",
- "cl_image": "consensys/teku:latest",
- "count": 1
- }
- ],
- "launch_additional_services": false
-}
+First, in your home directory, create a file with the name `network_params.yaml` with the following contents:
+```yaml
+participants:
+ - el_type: reth
+ el_image: ghcr.io/paradigmxyz/reth
+ cl_type: lighthouse
+ cl_image: sigp/lighthouse:latest
+ - el_type: reth
+ el_image: ghcr.io/paradigmxyz/reth
+ cl_type: teku
+ cl_image: consensys/teku:latest
```
> [!TIP]
-> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.json` file.
+> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.yaml` file.
### Step 2: Spin up your network
Next, run the following command from your command line:
```bash
-kurtosis run github.com/kurtosis-tech/ethereum-package --args-file ~/network_params.json
+kurtosis run github.com/ethpandaops/ethereum-package --args-file ~/network_params.yaml --image-download always
```
Kurtosis will spin up an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output:
```console
-INFO[2023-08-21T18:22:18-04:00] ====================================================
-INFO[2023-08-21T18:22:18-04:00] || Created enclave: silky-swamp ||
-INFO[2023-08-21T18:22:18-04:00] ====================================================
-Name: silky-swamp
-UUID: 3df730c66123
+INFO[2024-07-09T12:01:35+02:00] ========================================================
+INFO[2024-07-09T12:01:35+02:00] || Created enclave: silent-mountain ||
+INFO[2024-07-09T12:01:35+02:00] ========================================================
+Name: silent-mountain
+UUID: cb5d0a7d0e7c
Status: RUNNING
-Creation Time: Mon, 21 Aug 2023 18:21:32 EDT
+Creation Time: Tue, 09 Jul 2024 12:00:03 CEST
+Flags:
========================================= Files Artifacts =========================================
UUID Name
-c168ec4468f6 1-lighthouse-reth-0-63
-61f821e2cfd5 2-teku-reth-64-127
-e6f94fdac1b8 cl-genesis-data
-e6b57828d099 el-genesis-data
-1fb632573a2e genesis-generation-config-cl
-b8917e497980 genesis-generation-config-el
-6fd8c5be336a geth-prefunded-keys
-6ab83723b4bd prysm-password
+414a075a37aa 1-lighthouse-reth-0-63-0
+34d0b9ff906b 2-teku-reth-64-127-0
+dffa1bcd1da1 el_cl_genesis_data
+fdb202429b26 final-genesis-timestamp
+da0d9d24b340 genesis-el-cl-env-file
+55c46a6555ad genesis_validators_root
+ba79dbd109dd jwt_file
+04948fd8b1e3 keymanager_file
+538211b6b7d7 prysm-password
+ed75fe7d5293 validator-ranges
========================================== User Services ==========================================
-UUID Name Ports Status
-95386198d3f9 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:64947 RUNNING
- metrics: 5054/tcp -> http://127.0.0.1:64948
- tcp-discovery: 9000/tcp -> 127.0.0.1:64949
- udp-discovery: 9000/udp -> 127.0.0.1:60303
-5f5cc4cf639a cl-1-lighthouse-reth-validator http: 5042/tcp -> 127.0.0.1:64950 RUNNING
- metrics: 5064/tcp -> http://127.0.0.1:64951
-27e1cfaddc72 cl-2-teku-reth http: 4000/tcp -> 127.0.0.1:64954 RUNNING
- metrics: 8008/tcp -> 127.0.0.1:64952
- tcp-discovery: 9000/tcp -> 127.0.0.1:64953
- udp-discovery: 9000/udp -> 127.0.0.1:53749
-b454497fbec8 el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:64941 RUNNING
- metrics: 9001/tcp -> 127.0.0.1:64937
- rpc: 8545/tcp -> 127.0.0.1:64939
- tcp-discovery: 30303/tcp -> 127.0.0.1:64938
- udp-discovery: 30303/udp -> 127.0.0.1:55861
- ws: 8546/tcp -> 127.0.0.1:64940
-03a2ef13c99b el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:64945 RUNNING
- metrics: 9001/tcp -> 127.0.0.1:64946
- rpc: 8545/tcp -> 127.0.0.1:64943
- tcp-discovery: 30303/tcp -> 127.0.0.1:64942
- udp-discovery: 30303/udp -> 127.0.0.1:64186
- ws: 8546/tcp -> 127.0.0.1:64944
-5c199b334236 prelaunch-data-generator-cl-genesis-data RUNNING
-46829c4bd8b0 prelaunch-data-generator-el-genesis-data RUNNING
+UUID Name Ports Status
+0853f809c300 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:32811 RUNNING
+ metrics: 5054/tcp -> http://127.0.0.1:32812
+ tcp-discovery: 9000/tcp -> 127.0.0.1:32813
+ udp-discovery: 9000/udp -> 127.0.0.1:32776
+f81cd467efe3 cl-2-teku-reth http: 4000/tcp -> http://127.0.0.1:32814 RUNNING
+ metrics: 8008/tcp -> http://127.0.0.1:32815
+ tcp-discovery: 9000/tcp -> 127.0.0.1:32816
+ udp-discovery: 9000/udp -> 127.0.0.1:32777
+f21d5ca3061f el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:32803 RUNNING
+ metrics: 9001/tcp -> http://127.0.0.1:32804
+ rpc: 8545/tcp -> 127.0.0.1:32801
+ tcp-discovery: 30303/tcp -> 127.0.0.1:32805
+ udp-discovery: 30303/udp -> 127.0.0.1:32774
+ ws: 8546/tcp -> 127.0.0.1:32802
+e234b3b4a440 el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:32808 RUNNING
+ metrics: 9001/tcp -> http://127.0.0.1:32809
+ rpc: 8545/tcp -> 127.0.0.1:32806
+ tcp-discovery: 30303/tcp -> 127.0.0.1:32810
+ udp-discovery: 30303/udp -> 127.0.0.1:32775
+ ws: 8546/tcp -> 127.0.0.1:32807
+92dd5a0599dc validator-key-generation-cl-validator-keystore RUNNING
+f0a7d5343346 vc-1-reth-lighthouse metrics: 8080/tcp -> http://127.0.0.1:32817 RUNNING
```
-Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network.
+Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network.
## Using Kurtosis on Kubernetes
Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker or Kubernetes, locally or on remote infrastructure. For use cases that require a larger scale, Kurtosis can be deployed on Kubernetes by following these docs [here](https://docs.kurtosis.com/k8s/).
## Running the network with additional services
-The [`ethereum-package`](https://github.com/kurtosis-tech/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include:
+The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include:
- A Grafana + Prometheus instance
- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz)
- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer)
diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md
index d9df4139bb10..19d57e6398c9 100644
--- a/book/run/sync-op-mainnet.md
+++ b/book/run/sync-op-mainnet.md
@@ -12,9 +12,14 @@ Importing OP mainnet Bedrock datadir requires exported data:
## Manual Export Steps
-See .
+The `op-geth` Bedrock datadir can be downloaded from .
-Output from running the command to export state, can also be downloaded from .
+To export the OVM chain from `op-geth`, clone the `testinprod-io/op-geth` repo and checkout
+. Commands to export blocks, receipts and state dump can be
+found in `op-geth/migrate.sh`.
+
+Output from running the command to export state, can also be downloaded from
+.
## Manual Import Steps
diff --git a/book/run/transactions.md b/book/run/transactions.md
index 65aa979e238e..61327b57300a 100644
--- a/book/run/transactions.md
+++ b/book/run/transactions.md
@@ -1,6 +1,6 @@
# Transaction types
-Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Three significant transaction types that have evolved are:
+Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Four significant transaction types that have evolved are:
- Legacy Transactions,
- EIP-2930 Transactions,
@@ -46,4 +46,4 @@ Alongside the legacy parameters & parameters from EIP-1559, the EIP-4844 transac
- `max_fee_per_blob_gas`, The maximum total fee per gas the sender is willing to pay for blob gas in wei
- `blob_versioned_hashes`, List of versioned blob hashes associated with the transaction's EIP-4844 data blobs.
-The actual blob fee is deducted from the sender balance before transaction execution and burned, and is not refunded in case of transaction failure.
\ No newline at end of file
+The actual blob fee is deducted from the sender balance before transaction execution and burned, and is not refunded in case of transaction failure.
diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md
index 68a7cc29ea85..7368b6631abb 100644
--- a/book/run/troubleshooting.md
+++ b/book/run/troubleshooting.md
@@ -109,3 +109,71 @@ pthread_mutex_lock.c:438: __pthread_mutex_lock_full: Assertion `e != ESRCH || !r
If you are using Docker, a possible solution is to run all database-accessing containers with `--pid=host` flag.
For more information, check out the `Containers` section in the [libmdbx README](https://github.com/erthink/libmdbx#containers).
+
+## Hardware Performance Testing
+
+If you're experiencing degraded performance, it may be related to hardware issues. Below are some tools and tests you can run to evaluate your hardware performance.
+
+If your hardware performance is significantly lower than these reference numbers, it may explain degraded node performance. Consider upgrading your hardware or investigating potential issues with your current setup.
+
+### Disk Speed Testing with [IOzone](https://linux.die.net/man/1/iozone)
+
+1. Test disk speed:
+ ```bash
+ iozone -e -t1 -i0 -i2 -r1k -s1g /tmp
+ ```
+ Reference numbers (on Latitude c3.large.x86):
+
+ ```console
+ Children see throughput for 1 initial writers = 907733.81 kB/sec
+ Parent sees throughput for 1 initial writers = 907239.68 kB/sec
+ Children see throughput for 1 rewriters = 1765222.62 kB/sec
+ Parent sees throughput for 1 rewriters = 1763433.35 kB/sec
+ Children see throughput for 1 random readers = 1557497.38 kB/sec
+ Parent sees throughput for 1 random readers = 1554846.58 kB/sec
+ Children see throughput for 1 random writers = 984428.69 kB/sec
+ Parent sees throughput for 1 random writers = 983476.67 kB/sec
+ ```
+2. Test disk speed with memory-mapped files:
+ ```bash
+ iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp
+ ```
+ Reference numbers (on Latitude c3.large.x86):
+
+ ```console
+ Children see throughput for 1 initial writers = 56471.06 kB/sec
+ Parent sees throughput for 1 initial writers = 56365.14 kB/sec
+ Children see throughput for 1 rewriters = 241650.69 kB/sec
+ Parent sees throughput for 1 rewriters = 239067.96 kB/sec
+ Children see throughput for 1 random readers = 6833161.00 kB/sec
+ Parent sees throughput for 1 random readers = 5597659.65 kB/sec
+ Children see throughput for 1 random writers = 220248.53 kB/sec
+ Parent sees throughput for 1 random writers = 219112.26 kB/sec
+ ```
+
+### RAM Speed and Health Testing
+
+1. Check RAM speed with [lshw](https://linux.die.net/man/1/lshw):
+ ```bash
+ sudo lshw -short -C memory
+ ```
+ Look for the frequency in the output. Reference output:
+
+ ```console
+ H/W path Device Class Description
+ ================================================================
+ /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns)
+ /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns)
+ ...
+ ```
+
+2. Test RAM health with [memtester](https://linux.die.net/man/8/memtester):
+ ```bash
+ sudo memtester 10G
+ ```
+ This will take a while. You can test with a smaller amount first:
+
+ ```bash
+ sudo memtester 1G 1
+ ```
+ All checks should report "ok".
diff --git a/clippy.toml b/clippy.toml
index 7e606c3f1f9f..865dfc7c95a5 100644
--- a/clippy.toml
+++ b/clippy.toml
@@ -1,2 +1,2 @@
-msrv = "1.76"
+msrv = "1.79"
too-large-for-stack = 128
diff --git a/codecov.yml b/codecov.yml
deleted file mode 100644
index 5bd75590b504..000000000000
--- a/codecov.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-coverage:
- status:
- patch: off
- project:
- default:
- threshold: null
- informational: true
-github_checks:
- annotations: false
-comment:
- layout: "reach, files, flags, components"
- require_changes: true
-component_management:
- individual_components:
- - component_id: reth_binary
- name: reth binary
- paths:
- - bin/**
- - crates/config/**
- - crates/metrics/**
- - crates/tracing/**
- - component_id: blockchain_tree
- name: blockchain tree
- paths:
- - crates/blockchain-tree/**
- - component_id: staged_sync
- name: pipeline
- paths:
- - crates/stages/**
- - component_id: storage
- name: storage (db)
- paths:
- - crates/storage/**
- - component_id: trie
- name: trie
- paths:
- - crates/trie/**
- - component_id: txpool
- name: txpool
- paths:
- - crates/transaction-pool/**
- - component_id: networking
- name: networking
- paths:
- - crates/net/**
- - component_id: rpc
- name: rpc
- paths:
- - crates/rpc/**
- - component_id: consensus
- name: consensus
- paths:
- - crates/consensus/**
- - component_id: revm
- name: revm
- paths:
- - crates/revm/**
- - component_id: builder
- name: payload builder
- paths:
- - crates/payload/**
- - component_id: primitives
- name: primitives
- paths:
- - crates/primitives/**
- - crates/tasks/**
- - crates/rlp/**
- - crates/interfaces/**
\ No newline at end of file
diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml
index 64796939612d..b3679677a13c 100644
--- a/crates/blockchain-tree/Cargo.toml
+++ b/crates/blockchain-tree/Cargo.toml
@@ -21,6 +21,7 @@ reth-db-api.workspace = true
reth-evm.workspace = true
reth-revm.workspace = true
reth-provider.workspace = true
+reth-execution-types.workspace = true
reth-prune-types.workspace = true
reth-stages-api.workspace = true
reth-trie = { workspace = true, features = ["metrics"] }
@@ -39,9 +40,10 @@ metrics.workspace = true
# misc
aquamarine.workspace = true
-linked_hash_set = "0.1.4"
+linked_hash_set.workspace = true
[dev-dependencies]
+reth-chainspec.workspace = true
reth-db = { workspace = true, features = ["test-utils"] }
reth-primitives = { workspace = true, features = ["test-utils"] }
reth-provider = { workspace = true, features = ["test-utils"] }
@@ -52,6 +54,7 @@ reth-revm.workspace = true
reth-evm-ethereum.workspace = true
parking_lot.workspace = true
assert_matches.workspace = true
+alloy-genesis.workspace = true
[features]
test-utils = []
diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs
index 420912b4088c..41c71a4c475c 100644
--- a/crates/blockchain-tree/src/block_indices.rs
+++ b/crates/blockchain-tree/src/block_indices.rs
@@ -3,8 +3,8 @@
use super::state::BlockchainId;
use crate::canonical_chain::CanonicalChain;
use linked_hash_set::LinkedHashSet;
+use reth_execution_types::Chain;
use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders};
-use reth_provider::Chain;
use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet};
/// Internal indices of the blocks and chains.
diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs
index 84e7971e06c5..73bdbf2906ca 100644
--- a/crates/blockchain-tree/src/blockchain_tree.rs
+++ b/crates/blockchain-tree/src/blockchain_tree.rs
@@ -13,15 +13,15 @@ use reth_consensus::{Consensus, ConsensusError};
use reth_db_api::database::Database;
use reth_evm::execute::BlockExecutorProvider;
use reth_execution_errors::{BlockExecutionError, BlockValidationError};
+use reth_execution_types::{Chain, ExecutionOutcome};
use reth_primitives::{
- BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, Receipt, SealedBlock,
- SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256,
+ BlockHash, BlockNumHash, BlockNumber, EthereumHardfork, ForkBlock, GotExpected, Receipt,
+ SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256,
};
use reth_provider::{
BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification,
- CanonStateNotificationSender, CanonStateNotifications, Chain, ChainSpecProvider, ChainSplit,
- ChainSplitTarget, DisplayBlocksChain, ExecutionOutcome, HeaderProvider, ProviderError,
- StaticFileProviderFactory,
+ CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit,
+ ChainSplitTarget, DisplayBlocksChain, HeaderProvider, ProviderError, StaticFileProviderFactory,
};
use reth_prune_types::PruneModes;
use reth_stages_api::{MetricEvent, MetricEventsSender};
@@ -65,8 +65,6 @@ pub struct BlockchainTree {
externals: TreeExternals,
/// Tree configuration
config: BlockchainTreeConfig,
- /// Prune modes.
- prune_modes: Option,
/// Broadcast channel for canon state changes notifications.
canon_state_notification_sender: CanonStateNotificationSender,
/// Metrics for sync stages.
@@ -115,9 +113,9 @@ where
/// storage space efficiently. It's important to validate this configuration to ensure it does
/// not lead to unintended data loss.
pub fn new(
- externals: TreeExternals,
+ mut externals: TreeExternals,
config: BlockchainTreeConfig,
- prune_modes: Option,
+ prune_modes: PruneModes,
) -> ProviderResult {
let max_reorg_depth = config.max_reorg_depth() as usize;
// The size of the broadcast is twice the maximum reorg depth, because at maximum reorg
@@ -125,6 +123,9 @@ where
let (canon_state_notification_sender, _receiver) =
tokio::sync::broadcast::channel(max_reorg_depth * 2);
+ // Set the prune modes argument, on the provider
+ externals.provider_factory = externals.provider_factory.with_prune_modes(prune_modes);
+
let last_canonical_hashes =
externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?;
@@ -138,7 +139,6 @@ where
config.max_unconnected_blocks(),
),
config,
- prune_modes,
canon_state_notification_sender,
sync_metrics_tx: None,
metrics: Default::default(),
@@ -402,7 +402,7 @@ where
.externals
.provider_factory
.chain_spec()
- .fork(Hardfork::Paris)
+ .fork(EthereumHardfork::Paris)
.active_at_ttd(parent_td, U256::ZERO)
{
return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge {
@@ -1043,7 +1043,7 @@ where
.externals
.provider_factory
.chain_spec()
- .fork(Hardfork::Paris)
+ .fork(EthereumHardfork::Paris)
.active_at_ttd(td, U256::ZERO)
{
return Err(CanonicalError::from(BlockValidationError::BlockPreMerge {
@@ -1063,9 +1063,7 @@ where
};
// we are splitting chain at the block hash that we want to make canonical
- let Some(canonical) =
- self.remove_and_split_chain(chain_id, ChainSplitTarget::Hash(block_hash))
- else {
+ let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else {
debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present");
return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency {
chain_id: chain_id.into(),
@@ -1200,7 +1198,7 @@ where
}
});
- durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChilds);
+ durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren);
// Send notification about new canonical chain and return outcome of canonicalization.
let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() };
@@ -1260,7 +1258,6 @@ where
state,
hashed_state,
trie_updates,
- self.prune_modes.as_ref(),
)
.map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?;
@@ -1369,8 +1366,10 @@ where
#[cfg(test)]
mod tests {
use super::*;
+ use alloy_genesis::{Genesis, GenesisAccount};
use assert_matches::assert_matches;
use linked_hash_set::LinkedHashSet;
+ use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_consensus::test_utils::TestConsensus;
use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv};
use reth_db_api::transaction::DbTxMut;
@@ -1383,18 +1382,17 @@ mod tests {
use reth_primitives::{
constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH, ETHEREUM_BLOCK_GAS_LIMIT},
keccak256,
- proofs::{calculate_transaction_root, state_root_unhashed},
+ proofs::calculate_transaction_root,
revm_primitives::AccountInfo,
- Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature,
- Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256,
- MAINNET,
+ Account, Address, Header, Signature, Transaction, TransactionSigned,
+ TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256,
};
use reth_provider::{
test_utils::{blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec},
ProviderFactory,
};
use reth_stages_api::StageCheckpoint;
- use reth_trie::StateRoot;
+ use reth_trie::{root::state_root_unhashed, StateRoot};
use std::collections::HashMap;
fn setup_externals(
@@ -1425,7 +1423,6 @@ mod tests {
provider
.insert_historical_block(
genesis.try_seal_with_senders().expect("invalid tx signature in genesis"),
- None,
)
.unwrap();
@@ -1546,7 +1543,6 @@ mod tests {
SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default())
.try_seal_with_senders()
.unwrap(),
- None,
)
.unwrap();
let account = Account { balance: initial_signer_balance, ..Default::default() };
@@ -1648,7 +1644,7 @@ mod tests {
let mut tree = BlockchainTree::new(
TreeExternals::new(provider_factory, consensus, executor_provider),
BlockchainTreeConfig::default(),
- None,
+ PruneModes::default(),
)
.expect("failed to create tree");
@@ -1728,7 +1724,8 @@ mod tests {
// make tree
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
- let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree");
+ let mut tree = BlockchainTree::new(externals, config, PruneModes::default())
+ .expect("failed to create tree");
// genesis block 10 is already canonical
tree.make_canonical(B256::ZERO).unwrap();
@@ -1804,7 +1801,8 @@ mod tests {
// make tree
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
- let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree");
+ let mut tree = BlockchainTree::new(externals, config, PruneModes::default())
+ .expect("failed to create tree");
// genesis block 10 is already canonical
tree.make_canonical(B256::ZERO).unwrap();
@@ -1868,7 +1866,7 @@ mod tests {
);
let provider = tree.externals.provider_factory.provider().unwrap();
- let prefix_sets = exec5.hash_state_slow().construct_prefix_sets();
+ let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze();
let state_root =
StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap();
assert_eq!(state_root, block5.state_root);
@@ -1889,7 +1887,8 @@ mod tests {
// make tree
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
- let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree");
+ let mut tree = BlockchainTree::new(externals, config, PruneModes::default())
+ .expect("failed to create tree");
// genesis block 10 is already canonical
tree.make_canonical(B256::ZERO).unwrap();
@@ -1987,7 +1986,8 @@ mod tests {
// make tree
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
- let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree");
+ let mut tree = BlockchainTree::new(externals, config, PruneModes::default())
+ .expect("failed to create tree");
let mut canon_notif = tree.subscribe_canon_state();
// genesis block 10 is already canonical
@@ -2380,7 +2380,8 @@ mod tests {
// make tree
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
- let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree");
+ let mut tree = BlockchainTree::new(externals, config, PruneModes::default())
+ .expect("failed to create tree");
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
@@ -2400,8 +2401,8 @@ mod tests {
tree.make_canonical(block2.hash()).unwrap();
// restart
- let mut tree =
- BlockchainTree::new(cloned_externals_1, config, None).expect("failed to create tree");
+ let mut tree = BlockchainTree::new(cloned_externals_1, config, PruneModes::default())
+ .expect("failed to create tree");
assert_eq!(tree.block_indices().last_finalized_block(), 0);
let mut block1a = block1;
@@ -2417,8 +2418,8 @@ mod tests {
tree.finalize_block(block1a.number).unwrap();
// restart
- let tree =
- BlockchainTree::new(cloned_externals_2, config, None).expect("failed to create tree");
+ let tree = BlockchainTree::new(cloned_externals_2, config, PruneModes::default())
+ .expect("failed to create tree");
assert_eq!(tree.block_indices().last_finalized_block(), block1a.number);
}
diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs
index ab91ee5476a1..8a9893a1807f 100644
--- a/crates/blockchain-tree/src/canonical_chain.rs
+++ b/crates/blockchain-tree/src/canonical_chain.rs
@@ -12,7 +12,7 @@ pub(crate) struct CanonicalChain {
}
impl CanonicalChain {
- pub(crate) fn new(chain: BTreeMap) -> Self {
+ pub(crate) const fn new(chain: BTreeMap) -> Self {
Self { chain }
}
diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs
index d53009b76609..1e6be8353497 100644
--- a/crates/blockchain-tree/src/chain.rs
+++ b/crates/blockchain-tree/src/chain.rs
@@ -13,12 +13,13 @@ use reth_consensus::{Consensus, ConsensusError, PostExecutionInput};
use reth_db_api::database::Database;
use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor};
use reth_execution_errors::BlockExecutionError;
+use reth_execution_types::{Chain, ExecutionOutcome};
use reth_primitives::{
BlockHash, BlockNumber, ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader, U256,
};
use reth_provider::{
providers::{BundleStateProvider, ConsistentDbView},
- Chain, ExecutionOutcome, FullExecutionDataProvider, ProviderError, StateRootProvider,
+ FullExecutionDataProvider, ProviderError, StateRootProvider,
};
use reth_revm::database::StateProviderDatabase;
use reth_trie::updates::TrieUpdates;
diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs
index 735f1db96f1c..5d44a6391178 100644
--- a/crates/blockchain-tree/src/metrics.rs
+++ b/crates/blockchain-tree/src/metrics.rs
@@ -89,7 +89,7 @@ pub(crate) enum MakeCanonicalAction {
/// Inserting an old canonical chain.
InsertOldCanonicalChain,
/// Clearing trie updates of other children chains after fork choice update.
- ClearTrieUpdatesForOtherChilds,
+ ClearTrieUpdatesForOtherChildren,
}
/// Canonicalization metrics
@@ -118,7 +118,7 @@ struct MakeCanonicalMetrics {
insert_old_canonical_chain: Histogram,
/// Duration of the clear trie updates of other children chains after fork choice update
/// action.
- clear_trie_updates_for_other_childs: Histogram,
+ clear_trie_updates_for_other_children: Histogram,
}
impl MakeCanonicalMetrics {
@@ -145,8 +145,8 @@ impl MakeCanonicalMetrics {
MakeCanonicalAction::InsertOldCanonicalChain => {
self.insert_old_canonical_chain.record(duration)
}
- MakeCanonicalAction::ClearTrieUpdatesForOtherChilds => {
- self.clear_trie_updates_for_other_childs.record(duration)
+ MakeCanonicalAction::ClearTrieUpdatesForOtherChildren => {
+ self.clear_trie_updates_for_other_children.record(duration)
}
}
}
diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs
index 3ff14ca04f06..d92131dc8ac2 100644
--- a/crates/blockchain-tree/src/noop.rs
+++ b/crates/blockchain-tree/src/noop.rs
@@ -27,7 +27,7 @@ pub struct NoopBlockchainTree {
impl NoopBlockchainTree {
/// Create a new `NoopBlockchainTree` with a canon state notification sender.
- pub fn with_canon_state_notifications(
+ pub const fn with_canon_state_notifications(
canon_state_notification_sender: CanonStateNotificationSender,
) -> Self {
Self { canon_state_notification_sender: Some(canon_state_notification_sender) }
diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml
new file mode 100644
index 000000000000..e4574acdace9
--- /dev/null
+++ b/crates/chainspec/Cargo.toml
@@ -0,0 +1,62 @@
+[package]
+name = "reth-chainspec"
+version.workspace = true
+edition.workspace = true
+homepage.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+
+[lints]
+workspace = true
+
+[dependencies]
+# reth
+reth-ethereum-forks.workspace = true
+reth-network-peers.workspace = true
+reth-trie-common.workspace = true
+reth-primitives-traits.workspace = true
+
+# ethereum
+alloy-chains = { workspace = true, features = ["serde", "rlp"] }
+alloy-eips = { workspace = true, features = ["serde"] }
+alloy-genesis.workspace = true
+alloy-primitives = { workspace = true, features = ["rand", "rlp"] }
+alloy-trie.workspace = true
+
+# op
+op-alloy-rpc-types = { workspace = true, optional = true }
+
+
+# misc
+once_cell.workspace = true
+serde = { workspace = true, optional = true }
+serde_json.workspace = true
+derive_more.workspace = true
+
+[dev-dependencies]
+# eth
+nybbles = { workspace = true, features = ["arbitrary"] }
+alloy-trie = { workspace = true, features = ["arbitrary"] }
+alloy-eips = { workspace = true, features = ["arbitrary"] }
+alloy-rlp = { workspace = true, features = ["arrayvec"] }
+alloy-genesis.workspace = true
+reth-rpc-types.workspace = true
+rand.workspace = true
+
+# op
+op-alloy-rpc-types.workspace = true
+
+[features]
+default = ["std"]
+optimism = [
+ "reth-ethereum-forks/optimism",
+ "serde",
+ "dep:op-alloy-rpc-types",
+]
+std = []
+arbitrary = [
+ "alloy-chains/arbitrary"
+]
+
+
diff --git a/crates/primitives/res/genesis/base.json b/crates/chainspec/res/genesis/base.json
similarity index 100%
rename from crates/primitives/res/genesis/base.json
rename to crates/chainspec/res/genesis/base.json
diff --git a/crates/primitives/res/genesis/dev.json b/crates/chainspec/res/genesis/dev.json
similarity index 100%
rename from crates/primitives/res/genesis/dev.json
rename to crates/chainspec/res/genesis/dev.json
diff --git a/crates/primitives/res/genesis/goerli.json b/crates/chainspec/res/genesis/goerli.json
similarity index 100%
rename from crates/primitives/res/genesis/goerli.json
rename to crates/chainspec/res/genesis/goerli.json
diff --git a/crates/primitives/res/genesis/holesky.json b/crates/chainspec/res/genesis/holesky.json
similarity index 100%
rename from crates/primitives/res/genesis/holesky.json
rename to crates/chainspec/res/genesis/holesky.json
diff --git a/crates/primitives/res/genesis/mainnet.json b/crates/chainspec/res/genesis/mainnet.json
similarity index 100%
rename from crates/primitives/res/genesis/mainnet.json
rename to crates/chainspec/res/genesis/mainnet.json
diff --git a/crates/primitives/res/genesis/optimism.json b/crates/chainspec/res/genesis/optimism.json
similarity index 100%
rename from crates/primitives/res/genesis/optimism.json
rename to crates/chainspec/res/genesis/optimism.json
diff --git a/crates/primitives/res/genesis/sepolia.json b/crates/chainspec/res/genesis/sepolia.json
similarity index 100%
rename from crates/primitives/res/genesis/sepolia.json
rename to crates/chainspec/res/genesis/sepolia.json
diff --git a/crates/primitives/res/genesis/sepolia_base.json b/crates/chainspec/res/genesis/sepolia_base.json
similarity index 100%
rename from crates/primitives/res/genesis/sepolia_base.json
rename to crates/chainspec/res/genesis/sepolia_base.json
diff --git a/crates/primitives/res/genesis/sepolia_op.json b/crates/chainspec/res/genesis/sepolia_op.json
similarity index 100%
rename from crates/primitives/res/genesis/sepolia_op.json
rename to crates/chainspec/res/genesis/sepolia_op.json
diff --git a/crates/primitives/res/genesis/weave_wm_testnet_v0.json b/crates/chainspec/res/genesis/weave_wm_testnet_v0.json
similarity index 100%
rename from crates/primitives/res/genesis/weave_wm_testnet_v0.json
rename to crates/chainspec/res/genesis/weave_wm_testnet_v0.json
diff --git a/crates/chainspec/src/constants/mod.rs b/crates/chainspec/src/constants/mod.rs
new file mode 100644
index 000000000000..cde927189c8b
--- /dev/null
+++ b/crates/chainspec/src/constants/mod.rs
@@ -0,0 +1,12 @@
+use crate::spec::DepositContract;
+use alloy_primitives::{address, b256};
+
+/// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa`
+pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new(
+ address!("00000000219ab540356cbb839cbe05303d7705fa"),
+ 11052984,
+ b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
+);
+
+#[cfg(feature = "optimism")]
+pub(crate) mod optimism;
diff --git a/crates/chainspec/src/constants/optimism.rs b/crates/chainspec/src/constants/optimism.rs
new file mode 100644
index 000000000000..1c32df6f37ed
--- /dev/null
+++ b/crates/chainspec/src/constants/optimism.rs
@@ -0,0 +1,46 @@
+use alloy_eips::eip1559::BaseFeeParams;
+use reth_primitives_traits::constants::{
+ BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+ OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON,
+ OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR,
+ OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+ OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON,
+ OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR,
+ OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
+
+/// Get the base fee parameters for Base Sepolia.
+pub(crate) const BASE_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
+ max_change_denominator: OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR,
+ elasticity_multiplier: BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
+
+/// Get the base fee parameters for Base Sepolia (post Canyon).
+pub(crate) const BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
+ max_change_denominator: OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON,
+ elasticity_multiplier: BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
+
+/// Get the base fee parameters for Optimism Sepolia.
+pub(crate) const OP_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
+ max_change_denominator: OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR,
+ elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
+
+/// Get the base fee parameters for Optimism Sepolia (post Canyon).
+pub(crate) const OP_SEPOLIA_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
+ max_change_denominator: OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON,
+ elasticity_multiplier: OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
+
+/// Get the base fee parameters for Optimism Mainnet.
+pub(crate) const OP_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
+ max_change_denominator: OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR,
+ elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
+
+/// Get the base fee parameters for Optimism Mainnet (post Canyon).
+pub(crate) const OP_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
+ max_change_denominator: OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON,
+ elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
+};
diff --git a/crates/primitives/src/chain/info.rs b/crates/chainspec/src/info.rs
similarity index 86%
rename from crates/primitives/src/chain/info.rs
rename to crates/chainspec/src/info.rs
index 38b73e2768ae..6fe82d0a249b 100644
--- a/crates/primitives/src/chain/info.rs
+++ b/crates/chainspec/src/info.rs
@@ -1,4 +1,5 @@
-use crate::{BlockNumHash, BlockNumber, B256};
+use alloy_eips::BlockNumHash;
+use alloy_primitives::{BlockNumber, B256};
/// Current status of the blockchain's head.
#[derive(Default, Copy, Clone, Debug, Eq, PartialEq)]
diff --git a/crates/primitives/src/chain/mod.rs b/crates/chainspec/src/lib.rs
similarity index 70%
rename from crates/primitives/src/chain/mod.rs
rename to crates/chainspec/src/lib.rs
index 727abe038112..17f766f5b0fd 100644
--- a/crates/primitives/src/chain/mod.rs
+++ b/crates/chainspec/src/lib.rs
@@ -1,26 +1,44 @@
+//! The spec of an Ethereum network
+
+#![doc(
+ html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
+ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
+ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
+)]
+#![cfg_attr(not(test), warn(unused_crate_dependencies))]
+#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
+#![cfg_attr(not(feature = "std"), no_std)]
+
pub use alloy_chains::{Chain, ChainKind, NamedChain};
pub use info::ChainInfo;
pub use spec::{
- AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder,
- DepositContract, DisplayHardforks, ForkBaseFeeParams, ForkCondition, DEV, GOERLI, HOLESKY,
- MAINNET, SEPOLIA,
+ BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract,
+ ForkBaseFeeParams, DEV, HOLESKY, MAINNET, SEPOLIA,
};
#[cfg(feature = "optimism")]
pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA};
-#[cfg(feature = "optimism")]
-#[cfg(test)]
-pub(crate) use spec::{OP_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS};
+#[cfg(not(feature = "std"))]
+extern crate alloc;
-// The chain spec module.
-mod spec;
-// The chain info module.
+// /// The config info module namely spec id.
+// pub mod config;
+/// The chain info module.
mod info;
+/// The chain spec module.
+mod spec;
+
+/// Chain specific constants
+pub(crate) mod constants;
+
+/// Re-export for convenience
+pub use reth_ethereum_forks::*;
+
#[cfg(test)]
mod tests {
use super::*;
- use crate::U256;
+ use alloy_primitives::U256;
use alloy_rlp::Encodable;
use std::str::FromStr;
@@ -32,8 +50,8 @@ mod tests {
#[test]
fn test_named_id() {
- let chain = Chain::from_named(NamedChain::Goerli);
- assert_eq!(chain.id(), 5);
+ let chain = Chain::from_named(NamedChain::Holesky);
+ assert_eq!(chain.id(), 17000);
}
#[test]
@@ -59,9 +77,9 @@ mod tests {
#[test]
fn test_into_u256() {
- let chain = Chain::from_named(NamedChain::Goerli);
+ let chain = Chain::from_named(NamedChain::Holesky);
let n: U256 = U256::from(chain.id());
- let expected = U256::from(5);
+ let expected = U256::from(17000);
assert_eq!(n, expected);
}
diff --git a/crates/primitives/src/chain/spec.rs b/crates/chainspec/src/spec.rs
similarity index 66%
rename from crates/primitives/src/chain/spec.rs
rename to crates/chainspec/src/spec.rs
index abdd1775e287..38521cf93a2b 100644
--- a/crates/primitives/src/chain/spec.rs
+++ b/crates/chainspec/src/spec.rs
@@ -1,74 +1,54 @@
-use crate::{
+use crate::constants::MAINNET_DEPOSIT_CONTRACT;
+#[cfg(not(feature = "std"))]
+use alloc::{boxed::Box, sync::Arc, vec::Vec};
+use alloy_chains::{Chain, ChainKind, NamedChain};
+use alloy_genesis::Genesis;
+use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256};
+use alloy_trie::EMPTY_ROOT_HASH;
+use derive_more::From;
+use once_cell::sync::Lazy;
+use reth_ethereum_forks::{
+ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition,
+ ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Head, DEV_HARDFORKS,
+};
+use reth_network_peers::NodeRecord;
+use reth_primitives_traits::{
constants::{
- EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_ROOT_HASH, EMPTY_TRANSACTIONS,
- EMPTY_WITHDRAWALS,
+ DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, HOLESKY_GENESIS_HASH,
+ MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
},
- holesky_nodes,
- net::{goerli_nodes, mainnet_nodes, sepolia_nodes},
- proofs::state_root_ref_unhashed,
- revm_primitives::{address, b256},
- Address, BlockNumber, Chain, ChainKind, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis,
- Hardfork, Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH,
- MAINNET_DEPOSIT_CONTRACT, U256,
-};
-use once_cell::sync::Lazy;
-use serde::{Deserialize, Serialize};
-use std::{
- collections::BTreeMap,
- fmt::{Display, Formatter},
- sync::Arc,
+ Header, SealedHeader,
};
+use reth_trie_common::root::state_root_ref_unhashed;
+#[cfg(feature = "std")]
+use std::sync::Arc;
+#[cfg(feature = "optimism")]
+use crate::constants::optimism::{
+ BASE_SEPOLIA_BASE_FEE_PARAMS, BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS, OP_BASE_FEE_PARAMS,
+ OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS,
+};
pub use alloy_eips::eip1559::BaseFeeParams;
-
#[cfg(feature = "optimism")]
-pub(crate) use crate::{
- constants::{
- OP_BASE_FEE_PARAMS, OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS,
- OP_SEPOLIA_CANYON_BASE_FEE_PARAMS,
- },
- net::{base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes},
+use reth_ethereum_forks::OptimismHardfork;
+use reth_network_peers::{
+ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes,
+ sepolia_nodes,
};
/// The Ethereum mainnet spec
pub static MAINNET: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::mainnet(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/mainnet.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/mainnet.json"))
.expect("Can't deserialize Mainnet genesis json"),
- genesis_hash: Some(b256!(
- "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
- )),
+ genesis_hash: Some(MAINNET_GENESIS_HASH),
//
paris_block_and_final_difficulty: Some((
15537394,
U256::from(58_750_003_716_598_352_816_469u128),
)),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(1150000)),
- (Hardfork::Dao, ForkCondition::Block(1920000)),
- (Hardfork::Tangerine, ForkCondition::Block(2463000)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(2675000)),
- (Hardfork::Byzantium, ForkCondition::Block(4370000)),
- (Hardfork::Constantinople, ForkCondition::Block(7280000)),
- (Hardfork::Petersburg, ForkCondition::Block(7280000)),
- (Hardfork::Istanbul, ForkCondition::Block(9069000)),
- (Hardfork::MuirGlacier, ForkCondition::Block(9200000)),
- (Hardfork::Berlin, ForkCondition::Block(12244000)),
- (Hardfork::London, ForkCondition::Block(12965000)),
- (Hardfork::ArrowGlacier, ForkCondition::Block(13773000)),
- (Hardfork::GrayGlacier, ForkCondition::Block(15050000)),
- (
- Hardfork::Paris,
- ForkCondition::TTD {
- fork_block: None,
- total_difficulty: U256::from(58_750_000_000_000_000_000_000_u128),
- },
- ),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1681338455)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1710338135)),
- ]),
+ hardforks: EthereumHardfork::mainnet().into(),
// https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0
deposit_contract: Some(DepositContract::new(
address!("00000000219ab540356cbb839cbe05303d7705fa"),
@@ -81,82 +61,16 @@ pub static MAINNET: Lazy> = Lazy::new(|| {
.into()
});
-/// The Goerli spec
-pub static GOERLI: Lazy> = Lazy::new(|| {
- ChainSpec {
- chain: Chain::goerli(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/goerli.json"))
- .expect("Can't deserialize Goerli genesis json"),
- genesis_hash: Some(b256!(
- "bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a"
- )),
- //
- paris_block_and_final_difficulty: Some((7382818, U256::from(10_790_000))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Dao, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(1561651)),
- (Hardfork::Berlin, ForkCondition::Block(4460644)),
- (Hardfork::London, ForkCondition::Block(5062605)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: None, total_difficulty: U256::from(10_790_000) },
- ),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1678832736)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1705473120)),
- ]),
- // https://goerli.etherscan.io/tx/0xa3c07dc59bfdb1bfc2d50920fed2ef2c1c4e0a09fe2325dbc14e07702f965a78
- deposit_contract: Some(DepositContract::new(
- address!("ff50ed3d0ec03ac01d4c79aad74928bff48a7b2b"),
- 4367322,
- b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
- )),
- base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
- prune_delete_limit: 1700,
- }
- .into()
-});
-
/// The Sepolia spec
pub static SEPOLIA: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::sepolia(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/sepolia.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/sepolia.json"))
.expect("Can't deserialize Sepolia genesis json"),
- genesis_hash: Some(b256!(
- "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"
- )),
+ genesis_hash: Some(SEPOLIA_GENESIS_HASH),
//
paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Dao, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(0)),
- (Hardfork::London, ForkCondition::Block(0)),
- (
- Hardfork::Paris,
- ForkCondition::TTD {
- fork_block: Some(1735371),
- total_difficulty: U256::from(17_000_000_000_000_000u64),
- },
- ),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1677557088)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1706655072)),
- ]),
+ hardforks: EthereumHardfork::sepolia().into(),
// https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14
deposit_contract: Some(DepositContract::new(
address!("7f02c3e3c98b133055b8b348b2ac625669ed295d"),
@@ -173,32 +87,11 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| {
pub static HOLESKY: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::holesky(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/holesky.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/holesky.json"))
.expect("Can't deserialize Holesky genesis json"),
- genesis_hash: Some(b256!(
- "b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"
- )),
+ genesis_hash: Some(HOLESKY_GENESIS_HASH),
paris_block_and_final_difficulty: Some((0, U256::from(1))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Dao, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(0)),
- (Hardfork::London, ForkCondition::Block(0)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO },
- ),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1696000704)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1707305664)),
- ]),
+ hardforks: EthereumHardfork::holesky().into(),
deposit_contract: Some(DepositContract::new(
address!("4242424242424242424242424242424242424242"),
0,
@@ -217,38 +110,11 @@ pub static HOLESKY: Lazy> = Lazy::new(|| {
pub static DEV: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::dev(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/dev.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/dev.json"))
.expect("Can't deserialize Dev testnet genesis json"),
- genesis_hash: Some(b256!(
- "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c"
- )),
+ genesis_hash: Some(DEV_GENESIS_HASH),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Dao, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(0)),
- (Hardfork::London, ForkCondition::Block(0)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) },
- ),
- (Hardfork::Shanghai, ForkCondition::Timestamp(0)),
- (Hardfork::Cancun, ForkCondition::Timestamp(0)),
- #[cfg(feature = "optimism")]
- (Hardfork::Regolith, ForkCondition::Timestamp(0)),
- #[cfg(feature = "optimism")]
- (Hardfork::Bedrock, ForkCondition::Block(0)),
- #[cfg(feature = "optimism")]
- (Hardfork::Ecotone, ForkCondition::Timestamp(0)),
- ]),
+ hardforks: DEV_HARDFORKS.clone(),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
deposit_contract: None, // TODO: do we even have?
..Default::default()
@@ -263,42 +129,17 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| {
chain: Chain::optimism_mainnet(),
// genesis contains empty alloc field because state at first bedrock block is imported
// manually from trusted source
- genesis: serde_json::from_str(include_str!("../../res/genesis/optimism.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/optimism.json"))
.expect("Can't deserialize Optimism Mainnet genesis json"),
genesis_hash: Some(b256!(
"7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b"
)),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(3950000)),
- (Hardfork::London, ForkCondition::Block(105235063)),
- (Hardfork::ArrowGlacier, ForkCondition::Block(105235063)),
- (Hardfork::GrayGlacier, ForkCondition::Block(105235063)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::from(0) },
- ),
- (Hardfork::Bedrock, ForkCondition::Block(105235063)),
- (Hardfork::Regolith, ForkCondition::Timestamp(0)),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)),
- (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)),
- (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)),
- (Hardfork::Fjord, ForkCondition::Timestamp(1720627201)),
- ]),
+ hardforks: OptimismHardfork::op_mainnet(),
base_fee_params: BaseFeeParamsKind::Variable(
vec![
- (Hardfork::London, OP_BASE_FEE_PARAMS),
- (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS),
+ (EthereumHardfork::London.boxed(), OP_BASE_FEE_PARAMS),
+ (OptimismHardfork::Canyon.boxed(), OP_CANYON_BASE_FEE_PARAMS),
]
.into(),
),
@@ -313,42 +154,17 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| {
pub static OP_SEPOLIA: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::from_named(NamedChain::OptimismSepolia),
- genesis: serde_json::from_str(include_str!("../../res/genesis/sepolia_op.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/sepolia_op.json"))
.expect("Can't deserialize OP Sepolia genesis json"),
genesis_hash: Some(b256!(
"102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d"
)),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(0)),
- (Hardfork::London, ForkCondition::Block(0)),
- (Hardfork::ArrowGlacier, ForkCondition::Block(0)),
- (Hardfork::GrayGlacier, ForkCondition::Block(0)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) },
- ),
- (Hardfork::Bedrock, ForkCondition::Block(0)),
- (Hardfork::Regolith, ForkCondition::Timestamp(0)),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1699981200)),
- (Hardfork::Canyon, ForkCondition::Timestamp(1699981200)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1708534800)),
- (Hardfork::Ecotone, ForkCondition::Timestamp(1708534800)),
- (Hardfork::Fjord, ForkCondition::Timestamp(1716998400)),
- ]),
+ hardforks: OptimismHardfork::op_sepolia(),
base_fee_params: BaseFeeParamsKind::Variable(
vec![
- (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS),
- (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS),
+ (EthereumHardfork::London.boxed(), OP_SEPOLIA_BASE_FEE_PARAMS),
+ (OptimismHardfork::Canyon.boxed(), OP_SEPOLIA_CANYON_BASE_FEE_PARAMS),
]
.into(),
),
@@ -363,42 +179,17 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| {
pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::base_sepolia(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/sepolia_base.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/sepolia_base.json"))
.expect("Can't deserialize Base Sepolia genesis json"),
genesis_hash: Some(b256!(
"0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4"
)),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(0)),
- (Hardfork::London, ForkCondition::Block(0)),
- (Hardfork::ArrowGlacier, ForkCondition::Block(0)),
- (Hardfork::GrayGlacier, ForkCondition::Block(0)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) },
- ),
- (Hardfork::Bedrock, ForkCondition::Block(0)),
- (Hardfork::Regolith, ForkCondition::Timestamp(0)),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1699981200)),
- (Hardfork::Canyon, ForkCondition::Timestamp(1699981200)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1708534800)),
- (Hardfork::Ecotone, ForkCondition::Timestamp(1708534800)),
- (Hardfork::Fjord, ForkCondition::Timestamp(1716998400)),
- ]),
+ hardforks: OptimismHardfork::base_sepolia(),
base_fee_params: BaseFeeParamsKind::Variable(
vec![
- (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS),
- (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS),
+ (EthereumHardfork::London.boxed(), BASE_SEPOLIA_BASE_FEE_PARAMS),
+ (OptimismHardfork::Canyon.boxed(), BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS),
]
.into(),
),
@@ -413,42 +204,17 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| {
pub static BASE_MAINNET: Lazy> = Lazy::new(|| {
ChainSpec {
chain: Chain::base_mainnet(),
- genesis: serde_json::from_str(include_str!("../../res/genesis/base.json"))
+ genesis: serde_json::from_str(include_str!("../res/genesis/base.json"))
.expect("Can't deserialize Base genesis json"),
genesis_hash: Some(b256!(
"f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd"
)),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
- hardforks: BTreeMap::from([
- (Hardfork::Frontier, ForkCondition::Block(0)),
- (Hardfork::Homestead, ForkCondition::Block(0)),
- (Hardfork::Tangerine, ForkCondition::Block(0)),
- (Hardfork::SpuriousDragon, ForkCondition::Block(0)),
- (Hardfork::Byzantium, ForkCondition::Block(0)),
- (Hardfork::Constantinople, ForkCondition::Block(0)),
- (Hardfork::Petersburg, ForkCondition::Block(0)),
- (Hardfork::Istanbul, ForkCondition::Block(0)),
- (Hardfork::MuirGlacier, ForkCondition::Block(0)),
- (Hardfork::Berlin, ForkCondition::Block(0)),
- (Hardfork::London, ForkCondition::Block(0)),
- (Hardfork::ArrowGlacier, ForkCondition::Block(0)),
- (Hardfork::GrayGlacier, ForkCondition::Block(0)),
- (
- Hardfork::Paris,
- ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) },
- ),
- (Hardfork::Bedrock, ForkCondition::Block(0)),
- (Hardfork::Regolith, ForkCondition::Timestamp(0)),
- (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)),
- (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)),
- (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)),
- (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)),
- (Hardfork::Fjord, ForkCondition::Timestamp(1720627201)),
- ]),
+ hardforks: OptimismHardfork::base_mainnet(),
base_fee_params: BaseFeeParamsKind::Variable(
vec![
- (Hardfork::London, OP_BASE_FEE_PARAMS),
- (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS),
+ (EthereumHardfork::London.boxed(), OP_BASE_FEE_PARAMS),
+ (OptimismHardfork::Canyon.boxed(), OP_CANYON_BASE_FEE_PARAMS),
]
.into(),
),
@@ -460,8 +226,7 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| {
/// A wrapper around [`BaseFeeParams`] that allows for specifying constant or dynamic EIP-1559
/// parameters based on the active [Hardfork].
-#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
-#[serde(untagged)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BaseFeeParamsKind {
/// Constant [`BaseFeeParams`]; used for chains that don't have dynamic EIP-1559 parameters
Constant(BaseFeeParams),
@@ -470,6 +235,12 @@ pub enum BaseFeeParamsKind {
Variable(ForkBaseFeeParams),
}
+impl Default for BaseFeeParamsKind {
+ fn default() -> Self {
+ BaseFeeParams::ethereum().into()
+ }
+}
+
impl From for BaseFeeParamsKind {
fn from(params: BaseFeeParams) -> Self {
Self::Constant(params)
@@ -484,12 +255,14 @@ impl From for BaseFeeParamsKind {
/// A type alias to a vector of tuples of [Hardfork] and [`BaseFeeParams`], sorted by [Hardfork]
/// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism.
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
-pub struct ForkBaseFeeParams(Vec<(Hardfork, BaseFeeParams)>);
+#[derive(Clone, Debug, PartialEq, Eq, From)]
+pub struct ForkBaseFeeParams(Vec<(Box, BaseFeeParams)>);
+
+impl core::ops::Deref for ChainSpec {
+ type Target = ChainHardforks;
-impl From> for ForkBaseFeeParams {
- fn from(params: Vec<(Hardfork, BaseFeeParams)>) -> Self {
- Self(params)
+ fn deref(&self) -> &Self::Target {
+ &self.hardforks
}
}
@@ -500,7 +273,7 @@ impl From> for ForkBaseFeeParams {
/// - Meta-information about the chain (the chain ID)
/// - The genesis block of the chain ([`Genesis`])
/// - What hardforks are activated, and under which conditions
-#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ChainSpec {
/// The chain ID
pub chain: Chain,
@@ -509,22 +282,19 @@ pub struct ChainSpec {
///
/// This acts as a small cache for known chains. If the chain is known, then the genesis hash
/// is also known ahead of time, and this will be `Some`.
- #[serde(skip, default)]
pub genesis_hash: Option,
/// The genesis block
pub genesis: Genesis,
- /// The block at which [`Hardfork::Paris`] was activated and the final difficulty at this
- /// block.
- #[serde(skip, default)]
+ /// The block at which [`EthereumHardfork::Paris`] was activated and the final difficulty at
+ /// this block.
pub paris_block_and_final_difficulty: Option<(u64, U256)>,
/// The active hard forks and their activation conditions
- pub hardforks: BTreeMap,
+ pub hardforks: ChainHardforks,
/// The deposit contract deployed for `PoS`
- #[serde(skip, default)]
pub deposit_contract: Option,
/// The parameters that configure how a block's base fee is computed
@@ -533,7 +303,6 @@ pub struct ChainSpec {
/// The delete limit for pruner, per block. In the actual pruner run it will be multiplied by
/// the amount of blocks between pruner runs to account for the difference in amount of new
/// data coming in.
- #[serde(default)]
pub prune_delete_limit: usize,
}
@@ -580,7 +349,7 @@ impl ChainSpec {
#[inline]
#[cfg(feature = "optimism")]
pub fn is_optimism(&self) -> bool {
- self.chain.is_optimism() || self.hardforks.contains_key(&Hardfork::Bedrock)
+ self.chain.is_optimism() || self.hardforks.get(OptimismHardfork::Bedrock).is_some()
}
/// Returns `true` if this chain contains Optimism configuration.
@@ -611,7 +380,7 @@ impl ChainSpec {
// If shanghai is activated, initialize the header with an empty withdrawals hash, and
// empty withdrawals list.
let withdrawals_root = self
- .fork(Hardfork::Shanghai)
+ .fork(EthereumHardfork::Shanghai)
.active_at_timestamp(self.genesis.timestamp)
.then_some(EMPTY_WITHDRAWALS);
@@ -636,12 +405,6 @@ impl ChainSpec {
};
Header {
- parent_hash: B256::ZERO,
- number: 0,
- transactions_root: EMPTY_TRANSACTIONS,
- ommers_hash: EMPTY_OMMER_ROOT_HASH,
- receipts_root: EMPTY_RECEIPTS,
- logs_bloom: Default::default(),
gas_limit: self.genesis.gas_limit as u64,
difficulty: self.genesis.difficulty,
nonce: self.genesis.nonce,
@@ -650,13 +413,13 @@ impl ChainSpec {
timestamp: self.genesis.timestamp,
mix_hash: self.genesis.mix_hash,
beneficiary: self.genesis.coinbase,
- gas_used: Default::default(),
base_fee_per_gas,
withdrawals_root,
parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
requests_root,
+ ..Default::default()
}
}
@@ -672,7 +435,7 @@ impl ChainSpec {
self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(EIP1559_INITIAL_BASE_FEE);
// If London is activated at genesis, we set the initial base fee as per EIP-1559.
- self.fork(Hardfork::London).active_at_block(0).then_some(genesis_base_fee)
+ self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee)
}
/// Get the [`BaseFeeParams`] for the chain at the given timestamp.
@@ -684,8 +447,8 @@ impl ChainSpec {
// first one that corresponds to a hardfork that is active at the
// given timestamp.
for (fork, params) in bf_params.iter().rev() {
- if self.is_fork_active_at_timestamp(*fork, timestamp) {
- return *params;
+ if self.hardforks.is_fork_active_at_timestamp(fork.clone(), timestamp) {
+ return *params
}
}
@@ -703,8 +466,8 @@ impl ChainSpec {
// first one that corresponds to a hardfork that is active at the
// given timestamp.
for (fork, params) in bf_params.iter().rev() {
- if self.is_fork_active_at_block(*fork, block_number) {
- return *params;
+ if self.hardforks.is_fork_active_at_block(fork.clone(), block_number) {
+ return *params
}
}
@@ -740,123 +503,55 @@ impl ChainSpec {
}
/// Get the fork filter for the given hardfork
- pub fn hardfork_fork_filter(&self, fork: Hardfork) -> Option {
- match self.fork(fork) {
+ pub fn hardfork_fork_filter(&self, fork: H) -> Option {
+ match self.hardforks.fork(fork.clone()) {
ForkCondition::Never => None,
- _ => Some(self.fork_filter(self.satisfy(self.fork(fork)))),
+ _ => Some(self.fork_filter(self.satisfy(self.hardforks.fork(fork)))),
}
}
- /// Returns the forks in this specification and their activation conditions.
- pub const fn hardforks(&self) -> &BTreeMap {
- &self.hardforks
- }
-
/// Returns the hardfork display helper.
pub fn display_hardforks(&self) -> DisplayHardforks {
DisplayHardforks::new(
- self.hardforks(),
+ &self.hardforks,
self.paris_block_and_final_difficulty.map(|(block, _)| block),
)
}
/// Get the fork id for the given hardfork.
#[inline]
- pub fn hardfork_fork_id(&self, fork: Hardfork) -> Option {
- match self.fork(fork) {
+ pub fn hardfork_fork_id(&self, fork: H) -> Option {
+ let condition = self.hardforks.fork(fork);
+ match condition {
ForkCondition::Never => None,
- _ => Some(self.fork_id(&self.satisfy(self.fork(fork)))),
+ _ => Some(self.fork_id(&self.satisfy(condition))),
}
}
- /// Convenience method to get the fork id for [`Hardfork::Shanghai`] from a given chainspec.
+ /// Convenience method to get the fork id for [`EthereumHardfork::Shanghai`] from a given
+ /// chainspec.
#[inline]
pub fn shanghai_fork_id(&self) -> Option {
- self.hardfork_fork_id(Hardfork::Shanghai)
+ self.hardfork_fork_id(EthereumHardfork::Shanghai)
}
- /// Convenience method to get the fork id for [`Hardfork::Cancun`] from a given chainspec.
+ /// Convenience method to get the fork id for [`EthereumHardfork::Cancun`] from a given
+ /// chainspec.
#[inline]
pub fn cancun_fork_id(&self) -> Option {
- self.hardfork_fork_id(Hardfork::Cancun)
+ self.hardfork_fork_id(EthereumHardfork::Cancun)
}
/// Convenience method to get the latest fork id from the chainspec. Panics if chainspec has no
/// hardforks.
#[inline]
pub fn latest_fork_id(&self) -> ForkId {
- self.hardfork_fork_id(*self.hardforks().last_key_value().unwrap().0).unwrap()
- }
-
- /// Get the fork condition for the given fork.
- pub fn fork(&self, fork: Hardfork) -> ForkCondition {
- self.hardforks.get(&fork).copied().unwrap_or(ForkCondition::Never)
- }
-
- /// Get an iterator of all hardforks with their respective activation conditions.
- pub fn forks_iter(&self) -> impl Iterator- + '_ {
- self.hardforks.iter().map(|(f, b)| (*f, *b))
- }
-
- /// Convenience method to check if a fork is active at a given timestamp.
- #[inline]
- pub fn is_fork_active_at_timestamp(&self, fork: Hardfork, timestamp: u64) -> bool {
- self.fork(fork).active_at_timestamp(timestamp)
- }
-
- /// Convenience method to check if a fork is active at a given block number
- #[inline]
- pub fn is_fork_active_at_block(&self, fork: Hardfork, block_number: u64) -> bool {
- self.fork(fork).active_at_block(block_number)
- }
-
- /// Convenience method to check if [`Hardfork::Shanghai`] is active at a given timestamp.
- #[inline]
- pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool {
- self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp)
- }
-
- /// Convenience method to check if [`Hardfork::Cancun`] is active at a given timestamp.
- #[inline]
- pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool {
- self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)
- }
-
- /// Convenience method to check if [`Hardfork::Prague`] is active at a given timestamp.
- #[inline]
- pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool {
- self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp)
- }
-
- /// Convenience method to check if [`Hardfork::Byzantium`] is active at a given block number.
- #[inline]
- pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool {
- self.fork(Hardfork::Byzantium).active_at_block(block_number)
- }
-
- /// Convenience method to check if [`Hardfork::SpuriousDragon`] is active at a given block
- /// number.
- #[inline]
- pub fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool {
- self.fork(Hardfork::SpuriousDragon).active_at_block(block_number)
- }
-
- /// Convenience method to check if [`Hardfork::Homestead`] is active at a given block number.
- #[inline]
- pub fn is_homestead_active_at_block(&self, block_number: u64) -> bool {
- self.fork(Hardfork::Homestead).active_at_block(block_number)
- }
-
- /// Convenience method to check if [`Hardfork::Bedrock`] is active at a given block number.
- #[cfg(feature = "optimism")]
- #[inline]
- pub fn is_bedrock_active_at_block(&self, block_number: u64) -> bool {
- self.fork(Hardfork::Bedrock).active_at_block(block_number)
+ self.hardfork_fork_id(self.hardforks.last().unwrap().0).unwrap()
}
/// Creates a [`ForkFilter`] for the block described by [Head].
pub fn fork_filter(&self, head: Head) -> ForkFilter {
- let forks = self.forks_iter().filter_map(|(_, condition)| {
+ let forks = self.hardforks.forks_iter().filter_map(|(_, condition)| {
// We filter out TTD-based forks w/o a pre-known block since those do not show up in the
// fork filter.
Some(match condition {
@@ -876,7 +571,7 @@ impl ChainSpec {
let mut current_applied = 0;
// handle all block forks before handling timestamp based forks. see: https://eips.ethereum.org/EIPS/eip-6122
- for (_, cond) in self.forks_iter() {
+ for (_, cond) in self.hardforks.forks_iter() {
// handle block based forks and the sepolia merge netsplit block edge case (TTD
// ForkCondition with Some(block))
if let ForkCondition::Block(block) |
@@ -898,7 +593,7 @@ impl ChainSpec {
// timestamp are ALWAYS applied after the merge.
//
// this filter ensures that no block-based forks are returned
- for timestamp in self.forks_iter().filter_map(|(_, cond)| {
+ for timestamp in self.hardforks.forks_iter().filter_map(|(_, cond)| {
cond.as_timestamp().filter(|time| time > &self.genesis.timestamp)
}) {
let cond = ForkCondition::Timestamp(timestamp);
@@ -942,7 +637,7 @@ impl ChainSpec {
///
/// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork.
pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option {
- let mut hardforks_iter = self.forks_iter().peekable();
+ let mut hardforks_iter = self.hardforks.forks_iter().peekable();
while let Some((_, curr_cond)) = hardforks_iter.next() {
if let Some((_, next_cond)) = hardforks_iter.peek() {
// peek and find the first occurrence of ForkCondition::TTD (merge) , or in
@@ -985,16 +680,11 @@ impl ChainSpec {
let chain = self.chain;
match chain.try_into().ok()? {
C::Mainnet => Some(mainnet_nodes()),
- C::Goerli => Some(goerli_nodes()),
C::Sepolia => Some(sepolia_nodes()),
C::Holesky => Some(holesky_nodes()),
- #[cfg(feature = "optimism")]
C::Base => Some(base_nodes()),
- #[cfg(feature = "optimism")]
C::Optimism => Some(op_nodes()),
- #[cfg(feature = "optimism")]
C::BaseGoerli | C::BaseSepolia => Some(base_testnet_nodes()),
- #[cfg(feature = "optimism")]
C::OptimismSepolia | C::OptimismGoerli | C::OptimismKovan => Some(op_testnet_nodes()),
_ => None,
}
@@ -1005,40 +695,43 @@ impl From for ChainSpec {
fn from(genesis: Genesis) -> Self {
#[cfg(feature = "optimism")]
let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis);
+ #[cfg(feature = "optimism")]
+ let genesis_info =
+ optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default();
// Block-based hardforks
let hardfork_opts = [
- (Hardfork::Homestead, genesis.config.homestead_block),
- (Hardfork::Dao, genesis.config.dao_fork_block),
- (Hardfork::Tangerine, genesis.config.eip150_block),
- (Hardfork::SpuriousDragon, genesis.config.eip155_block),
- (Hardfork::Byzantium, genesis.config.byzantium_block),
- (Hardfork::Constantinople, genesis.config.constantinople_block),
- (Hardfork::Petersburg, genesis.config.petersburg_block),
- (Hardfork::Istanbul, genesis.config.istanbul_block),
- (Hardfork::MuirGlacier, genesis.config.muir_glacier_block),
- (Hardfork::Berlin, genesis.config.berlin_block),
- (Hardfork::London, genesis.config.london_block),
- (Hardfork::ArrowGlacier, genesis.config.arrow_glacier_block),
- (Hardfork::GrayGlacier, genesis.config.gray_glacier_block),
+ (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block),
+ (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block),
+ (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block),
+ (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block),
+ (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block),
+ (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block),
+ (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block),
+ (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block),
+ (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block),
+ (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block),
+ (EthereumHardfork::London.boxed(), genesis.config.london_block),
+ (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block),
+ (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block),
#[cfg(feature = "optimism")]
- (Hardfork::Bedrock, optimism_genesis_info.bedrock_block),
+ (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block),
];
let mut hardforks = hardfork_opts
- .iter()
- .filter_map(|(hardfork, opt)| opt.map(|block| (*hardfork, ForkCondition::Block(block))))
- .collect::>();
+ .into_iter()
+ .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block))))
+ .collect::>();
// Paris
let paris_block_and_final_difficulty =
if let Some(ttd) = genesis.config.terminal_total_difficulty {
- hardforks.insert(
- Hardfork::Paris,
+ hardforks.push((
+ EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
total_difficulty: ttd,
fork_block: genesis.config.merge_netsplit_block,
},
- );
+ ));
genesis.config.merge_netsplit_block.map(|block| (block, ttd))
} else {
@@ -1047,28 +740,45 @@ impl From for ChainSpec {
// Time-based hardforks
let time_hardfork_opts = [
- (Hardfork::Shanghai, genesis.config.shanghai_time),
- (Hardfork::Cancun, genesis.config.cancun_time),
- (Hardfork::Prague, genesis.config.prague_time),
+ (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time),
+ (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time),
+ (EthereumHardfork::Prague.boxed(), genesis.config.prague_time),
#[cfg(feature = "optimism")]
- (Hardfork::Regolith, optimism_genesis_info.regolith_time),
+ (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time),
#[cfg(feature = "optimism")]
- (Hardfork::Canyon, optimism_genesis_info.canyon_time),
+ (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time),
#[cfg(feature = "optimism")]
- (Hardfork::Ecotone, optimism_genesis_info.ecotone_time),
+ (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time),
#[cfg(feature = "optimism")]
- (Hardfork::Fjord, optimism_genesis_info.fjord_time),
+ (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time),
];
let time_hardforks = time_hardfork_opts
- .iter()
+ .into_iter()
.filter_map(|(hardfork, opt)| {
- opt.map(|time| (*hardfork, ForkCondition::Timestamp(time)))
+ opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))
})
- .collect::>();
+ .collect::>();
hardforks.extend(time_hardforks);
+ // Uses ethereum or optimism main chains to find proper order
+ #[cfg(not(feature = "optimism"))]
+ let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into();
+ #[cfg(not(feature = "optimism"))]
+ let mainnet_order = mainnet_hardforks.forks_iter();
+ #[cfg(feature = "optimism")]
+ let mainnet_hardforks = OptimismHardfork::op_mainnet();
+ #[cfg(feature = "optimism")]
+ let mainnet_order = mainnet_hardforks.forks_iter();
+
+ let mut ordered_hardforks = Vec::with_capacity(hardforks.len());
+ for (hardfork, _) in mainnet_order {
+ if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) {
+ ordered_hardforks.push(hardforks[pos].clone());
+ }
+ }
+
// NOTE: in full node, we prune all receipts except the deposit contract's. We do not
// have the deployment block in the genesis file, so we use block zero. We use the same
// deposit topic as the mainnet contract if we have the deposit contract address in the
@@ -1081,7 +791,7 @@ impl From for ChainSpec {
chain: genesis.config.chain_id.into(),
genesis,
genesis_hash: None,
- hardforks,
+ hardforks: ChainHardforks::new(hardforks),
paris_block_and_final_difficulty,
deposit_contract,
#[cfg(feature = "optimism")]
@@ -1091,49 +801,12 @@ impl From for ChainSpec {
}
}
-/// A helper type for compatibility with geth's config
-#[derive(Debug, Clone, Deserialize, Serialize)]
-#[serde(untagged)]
-pub enum AllGenesisFormats {
- /// The reth genesis format
- Reth(ChainSpec),
- /// The geth genesis format
- Geth(Genesis),
-}
-
-impl From for AllGenesisFormats {
- fn from(genesis: Genesis) -> Self {
- Self::Geth(genesis)
- }
-}
-
-impl From for AllGenesisFormats {
- fn from(genesis: ChainSpec) -> Self {
- Self::Reth(genesis)
- }
-}
-
-impl From> for AllGenesisFormats {
- fn from(genesis: Arc) -> Self {
- Arc::try_unwrap(genesis).unwrap_or_else(|arc| (*arc).clone()).into()
- }
-}
-
-impl From for ChainSpec {
- fn from(genesis: AllGenesisFormats) -> Self {
- match genesis {
- AllGenesisFormats::Geth(genesis) => genesis.into(),
- AllGenesisFormats::Reth(genesis) => genesis,
- }
- }
-}
-
/// A helper to build custom chain specs
#[derive(Debug, Default, Clone)]
pub struct ChainSpecBuilder {
chain: Option,
genesis: Option,
- hardforks: BTreeMap,
+ hardforks: ChainHardforks,
}
impl ChainSpecBuilder {
@@ -1145,7 +818,9 @@ impl ChainSpecBuilder {
hardforks: MAINNET.hardforks.clone(),
}
}
+}
+impl ChainSpecBuilder {
/// Set the chain ID
pub const fn chain(mut self, chain: Chain) -> Self {
self.chain = Some(chain);
@@ -1159,14 +834,14 @@ impl ChainSpecBuilder {
}
/// Add the given fork with the given activation condition to the spec.
- pub fn with_fork(mut self, fork: Hardfork, condition: ForkCondition) -> Self {
+ pub fn with_fork(mut self, fork: EthereumHardfork, condition: ForkCondition) -> Self {
self.hardforks.insert(fork, condition);
self
}
/// Remove the given fork from the spec.
- pub fn without_fork(mut self, fork: Hardfork) -> Self {
- self.hardforks.remove(&fork);
+ pub fn without_fork(mut self, fork: EthereumHardfork) -> Self {
+ self.hardforks.remove(fork);
self
}
@@ -1175,77 +850,77 @@ impl ChainSpecBuilder {
/// Does not set the merge netsplit block.
pub fn paris_at_ttd(self, ttd: U256) -> Self {
self.with_fork(
- Hardfork::Paris,
+ EthereumHardfork::Paris,
ForkCondition::TTD { total_difficulty: ttd, fork_block: None },
)
}
/// Enable Frontier at genesis.
pub fn frontier_activated(mut self) -> Self {
- self.hardforks.insert(Hardfork::Frontier, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Frontier, ForkCondition::Block(0));
self
}
/// Enable Homestead at genesis.
pub fn homestead_activated(mut self) -> Self {
self = self.frontier_activated();
- self.hardforks.insert(Hardfork::Homestead, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Homestead, ForkCondition::Block(0));
self
}
/// Enable Tangerine at genesis.
pub fn tangerine_whistle_activated(mut self) -> Self {
self = self.homestead_activated();
- self.hardforks.insert(Hardfork::Tangerine, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Tangerine, ForkCondition::Block(0));
self
}
/// Enable Spurious Dragon at genesis.
pub fn spurious_dragon_activated(mut self) -> Self {
self = self.tangerine_whistle_activated();
- self.hardforks.insert(Hardfork::SpuriousDragon, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0));
self
}
/// Enable Byzantium at genesis.
pub fn byzantium_activated(mut self) -> Self {
self = self.spurious_dragon_activated();
- self.hardforks.insert(Hardfork::Byzantium, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Byzantium, ForkCondition::Block(0));
self
}
/// Enable Constantinople at genesis.
pub fn constantinople_activated(mut self) -> Self {
self = self.byzantium_activated();
- self.hardforks.insert(Hardfork::Constantinople, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Constantinople, ForkCondition::Block(0));
self
}
/// Enable Petersburg at genesis.
pub fn petersburg_activated(mut self) -> Self {
self = self.constantinople_activated();
- self.hardforks.insert(Hardfork::Petersburg, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Petersburg, ForkCondition::Block(0));
self
}
/// Enable Istanbul at genesis.
pub fn istanbul_activated(mut self) -> Self {
self = self.petersburg_activated();
- self.hardforks.insert(Hardfork::Istanbul, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Istanbul, ForkCondition::Block(0));
self
}
/// Enable Berlin at genesis.
pub fn berlin_activated(mut self) -> Self {
self = self.istanbul_activated();
- self.hardforks.insert(Hardfork::Berlin, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::Berlin, ForkCondition::Block(0));
self
}
/// Enable London at genesis.
pub fn london_activated(mut self) -> Self {
self = self.berlin_activated();
- self.hardforks.insert(Hardfork::London, ForkCondition::Block(0));
+ self.hardforks.insert(EthereumHardfork::London, ForkCondition::Block(0));
self
}
@@ -1253,7 +928,7 @@ impl ChainSpecBuilder {
pub fn paris_activated(mut self) -> Self {
self = self.london_activated();
self.hardforks.insert(
- Hardfork::Paris,
+ EthereumHardfork::Paris,
ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO },
);
self
@@ -1262,14 +937,14 @@ impl ChainSpecBuilder {
/// Enable Shanghai at genesis.
pub fn shanghai_activated(mut self) -> Self {
self = self.paris_activated();
- self.hardforks.insert(Hardfork::Shanghai, ForkCondition::Timestamp(0));
+ self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0));
self
}
/// Enable Cancun at genesis.
pub fn cancun_activated(mut self) -> Self {
self = self.shanghai_activated();
- self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0));
+ self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0));
self
}
@@ -1277,7 +952,7 @@ impl ChainSpecBuilder {
#[cfg(feature = "optimism")]
pub fn bedrock_activated(mut self) -> Self {
self = self.paris_activated();
- self.hardforks.insert(Hardfork::Bedrock, ForkCondition::Block(0));
+ self.hardforks.insert(OptimismHardfork::Bedrock, ForkCondition::Block(0));
self
}
@@ -1285,7 +960,7 @@ impl ChainSpecBuilder {
#[cfg(feature = "optimism")]
pub fn regolith_activated(mut self) -> Self {
self = self.bedrock_activated();
- self.hardforks.insert(Hardfork::Regolith, ForkCondition::Timestamp(0));
+ self.hardforks.insert(OptimismHardfork::Regolith, ForkCondition::Timestamp(0));
self
}
@@ -1294,8 +969,8 @@ impl ChainSpecBuilder {
pub fn canyon_activated(mut self) -> Self {
self = self.regolith_activated();
// Canyon also activates changes from L1's Shanghai hardfork
- self.hardforks.insert(Hardfork::Shanghai, ForkCondition::Timestamp(0));
- self.hardforks.insert(Hardfork::Canyon, ForkCondition::Timestamp(0));
+ self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0));
+ self.hardforks.insert(OptimismHardfork::Canyon, ForkCondition::Timestamp(0));
self
}
@@ -1303,8 +978,8 @@ impl ChainSpecBuilder {
#[cfg(feature = "optimism")]
pub fn ecotone_activated(mut self) -> Self {
self = self.canyon_activated();
- self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0));
- self.hardforks.insert(Hardfork::Ecotone, ForkCondition::Timestamp(0));
+ self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0));
+ self.hardforks.insert(OptimismHardfork::Ecotone, ForkCondition::Timestamp(0));
self
}
@@ -1312,7 +987,7 @@ impl ChainSpecBuilder {
#[cfg(feature = "optimism")]
pub fn fjord_activated(mut self) -> Self {
self = self.ecotone_activated();
- self.hardforks.insert(Hardfork::Fjord, ForkCondition::Timestamp(0));
+ self.hardforks.insert(OptimismHardfork::Fjord, ForkCondition::Timestamp(0));
self
}
@@ -1324,9 +999,9 @@ impl ChainSpecBuilder {
/// [`Self::genesis`])
pub fn build(self) -> ChainSpec {
let paris_block_and_final_difficulty = {
- self.hardforks.get(&Hardfork::Paris).and_then(|cond| {
+ self.hardforks.get(EthereumHardfork::Paris).and_then(|cond| {
if let ForkCondition::TTD { fork_block, total_difficulty } = cond {
- fork_block.map(|fork_block| (fork_block, *total_difficulty))
+ fork_block.map(|fork_block| (fork_block, total_difficulty))
} else {
None
}
@@ -1354,275 +1029,6 @@ impl From<&Arc> for ChainSpecBuilder {
}
}
-/// The condition at which a fork is activated.
-#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
-pub enum ForkCondition {
- /// The fork is activated after a certain block.
- Block(BlockNumber),
- /// The fork is activated after a total difficulty has been reached.
- TTD {
- /// The block number at which TTD is reached, if it is known.
- ///
- /// This should **NOT** be set unless you want this block advertised as [EIP-2124][eip2124]
- /// `FORK_NEXT`. This is currently only the case for Sepolia and Holesky.
- ///
- /// [eip2124]: https://eips.ethereum.org/EIPS/eip-2124
- fork_block: Option,
- /// The total difficulty after which the fork is activated.
- total_difficulty: U256,
- },
- /// The fork is activated after a specific timestamp.
- Timestamp(u64),
- /// The fork is never activated
- #[default]
- Never,
-}
-
-impl ForkCondition {
- /// Returns true if the fork condition is timestamp based.
- pub const fn is_timestamp(&self) -> bool {
- matches!(self, Self::Timestamp(_))
- }
-
- /// Checks whether the fork condition is satisfied at the given block.
- ///
- /// For TTD conditions, this will only return true if the activation block is already known.
- ///
- /// For timestamp conditions, this will always return false.
- pub const fn active_at_block(&self, current_block: BlockNumber) -> bool {
- matches!(self, Self::Block(block)
- | Self::TTD { fork_block: Some(block), .. } if current_block >= *block)
- }
-
- /// Checks if the given block is the first block that satisfies the fork condition.
- ///
- /// This will return false for any condition that is not block based.
- pub const fn transitions_at_block(&self, current_block: BlockNumber) -> bool {
- matches!(self, Self::Block(block) if current_block == *block)
- }
-
- /// Checks whether the fork condition is satisfied at the given total difficulty and difficulty
- /// of a current block.
- ///
- /// The fork is considered active if the _previous_ total difficulty is above the threshold.
- /// To achieve that, we subtract the passed `difficulty` from the current block's total
- /// difficulty, and check if it's above the Fork Condition's total difficulty (here:
- /// `58_750_000_000_000_000_000_000`)
- ///
- /// This will return false for any condition that is not TTD-based.
- pub fn active_at_ttd(&self, ttd: U256, difficulty: U256) -> bool {
- matches!(self, Self::TTD { total_difficulty, .. }
- if ttd.saturating_sub(difficulty) >= *total_difficulty)
- }
-
- /// Checks whether the fork condition is satisfied at the given timestamp.
- ///
- /// This will return false for any condition that is not timestamp-based.
- pub const fn active_at_timestamp(&self, timestamp: u64) -> bool {
- matches!(self, Self::Timestamp(time) if timestamp >= *time)
- }
-
- /// Checks whether the fork condition is satisfied at the given head block.
- ///
- /// This will return true if:
- ///
- /// - The condition is satisfied by the block number;
- /// - The condition is satisfied by the timestamp;
- /// - or the condition is satisfied by the total difficulty
- pub fn active_at_head(&self, head: &Head) -> bool {
- self.active_at_block(head.number) ||
- self.active_at_timestamp(head.timestamp) ||
- self.active_at_ttd(head.total_difficulty, head.difficulty)
- }
-
- /// Get the total terminal difficulty for this fork condition.
- ///
- /// Returns `None` for fork conditions that are not TTD based.
- pub const fn ttd(&self) -> Option {
- match self {
- Self::TTD { total_difficulty, .. } => Some(*total_difficulty),
- _ => None,
- }
- }
-
- /// Returns the timestamp of the fork condition, if it is timestamp based.
- pub const fn as_timestamp(&self) -> Option {
- match self {
- Self::Timestamp(timestamp) => Some(*timestamp),
- _ => None,
- }
- }
-}
-
-/// A container to pretty-print a hardfork.
-///
-/// The fork is formatted depending on its fork condition:
-///
-/// - Block and timestamp based forks are formatted in the same manner (`{name} <({eip})>
-/// @{condition}`)
-/// - TTD based forks are formatted separately as `{name} <({eip})> @{ttd} (network is known
-/// to be merged)`
-///
-/// An optional EIP can be attached to the fork to display as well. This should generally be in the
-/// form of just `EIP-x`, e.g. `EIP-1559`.
-#[derive(Debug)]
-struct DisplayFork {
- /// The name of the hardfork (e.g. Frontier)
- name: String,
- /// The fork condition
- activated_at: ForkCondition,
- /// An optional EIP (e.g. `EIP-1559`).
- eip: Option,
-}
-
-impl Display for DisplayFork {
- fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- let name_with_eip = if let Some(eip) = &self.eip {
- format!("{} ({})", self.name, eip)
- } else {
- self.name.clone()
- };
-
- match self.activated_at {
- ForkCondition::Block(at) | ForkCondition::Timestamp(at) => {
- write!(f, "{name_with_eip:32} @{at}")?;
- }
- ForkCondition::TTD { fork_block, total_difficulty } => {
- write!(
- f,
- "{:32} @{} ({})",
- name_with_eip,
- total_difficulty,
- if fork_block.is_some() {
- "network is known to be merged"
- } else {
- "network is not known to be merged"
- }
- )?;
- }
- ForkCondition::Never => unreachable!(),
- }
-
- Ok(())
- }
-}
-
-/// A container for pretty-printing a list of hardforks.
-///
-/// # Examples
-///
-/// ```
-/// # use reth_primitives::MAINNET;
-/// println!("{}", MAINNET.display_hardforks());
-/// ```
-///
-/// An example of the output:
-///
-/// ```text
-/// Pre-merge hard forks (block based):
-// - Frontier @0
-// - Homestead @1150000
-// - Dao @1920000
-// - Tangerine @2463000
-// - SpuriousDragon @2675000
-// - Byzantium @4370000
-// - Constantinople @7280000
-// - Petersburg @7280000
-// - Istanbul @9069000
-// - MuirGlacier @9200000
-// - Berlin @12244000
-// - London @12965000
-// - ArrowGlacier @13773000
-// - GrayGlacier @15050000
-// Merge hard forks:
-// - Paris @58750000000000000000000 (network is known to be merged)
-// Post-merge hard forks (timestamp based):
-// - Shanghai @1681338455
-/// ```
-#[derive(Debug)]
-pub struct DisplayHardforks {
- /// A list of pre-merge (block based) hardforks
- pre_merge: Vec,
- /// A list of merge (TTD based) hardforks
- with_merge: Vec,
- /// A list of post-merge (timestamp based) hardforks
- post_merge: Vec,
-}
-
-impl Display for DisplayHardforks {
- fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- fn format(
- header: &str,
- forks: &[DisplayFork],
- next_is_empty: bool,
- f: &mut Formatter<'_>,
- ) -> std::fmt::Result {
- writeln!(f, "{header}:")?;
- let mut iter = forks.iter().peekable();
- while let Some(fork) = iter.next() {
- write!(f, "- {fork}")?;
- if !next_is_empty || iter.peek().is_some() {
- writeln!(f)?;
- }
- }
- Ok(())
- }
-
- format(
- "Pre-merge hard forks (block based)",
- &self.pre_merge,
- self.with_merge.is_empty(),
- f,
- )?;
-
- if !self.with_merge.is_empty() {
- format("Merge hard forks", &self.with_merge, self.post_merge.is_empty(), f)?;
- }
-
- if !self.post_merge.is_empty() {
- format("Post-merge hard forks (timestamp based)", &self.post_merge, true, f)?;
- }
-
- Ok(())
- }
-}
-
-impl DisplayHardforks {
- /// Creates a new [`DisplayHardforks`] from an iterator of hardforks.
- pub fn new(
- hardforks: &BTreeMap,
- known_paris_block: Option,
- ) -> Self {
- let mut pre_merge = Vec::new();
- let mut with_merge = Vec::new();
- let mut post_merge = Vec::new();
-
- for (fork, condition) in hardforks {
- let mut display_fork =
- DisplayFork { name: fork.to_string(), activated_at: *condition, eip: None };
-
- match condition {
- ForkCondition::Block(_) => {
- pre_merge.push(display_fork);
- }
- ForkCondition::TTD { total_difficulty, .. } => {
- display_fork.activated_at = ForkCondition::TTD {
- fork_block: known_paris_block,
- total_difficulty: *total_difficulty,
- };
- with_merge.push(display_fork);
- }
- ForkCondition::Timestamp(_) => {
- post_merge.push(display_fork);
- }
- ForkCondition::Never => continue,
- }
- }
-
- Self { pre_merge, with_merge, post_merge }
- }
-}
-
/// `PoS` deposit contract details.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DepositContract {
@@ -1641,94 +1047,73 @@ impl DepositContract {
}
}
+/// Genesis info for Optimism.
#[cfg(feature = "optimism")]
+#[derive(Default, Debug, serde::Deserialize)]
+#[serde(rename_all = "camelCase")]
struct OptimismGenesisInfo {
- bedrock_block: Option,
- regolith_time: Option,
- canyon_time: Option,
- ecotone_time: Option,
- fjord_time: Option,
+ optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo,
+ #[serde(skip)]
base_fee_params: BaseFeeParamsKind,
}
#[cfg(feature = "optimism")]
impl OptimismGenesisInfo {
fn extract_from(genesis: &Genesis) -> Self {
- let optimism_config =
- genesis.config.extra_fields.get("optimism").and_then(|value| value.as_object());
-
- let eip1559_elasticity = optimism_config
- .and_then(|config| config.get("eip1559Elasticity"))
- .and_then(|value| value.as_u64());
-
- let eip1559_denominator = optimism_config
- .and_then(|config| config.get("eip1559Denominator"))
- .and_then(|value| value.as_u64());
-
- let eip1559_denominator_canyon = optimism_config
- .and_then(|config| config.get("eip1559DenominatorCanyon"))
- .and_then(|value| value.as_u64());
-
- let base_fee_params = if let (Some(elasticity), Some(denominator)) =
- (eip1559_elasticity, eip1559_denominator)
- {
- if let Some(canyon_denominator) = eip1559_denominator_canyon {
- BaseFeeParamsKind::Variable(
- vec![
- (
- Hardfork::London,
- BaseFeeParams::new(denominator as u128, elasticity as u128),
- ),
- (
- Hardfork::Canyon,
- BaseFeeParams::new(canyon_denominator as u128, elasticity as u128),
- ),
- ]
- .into(),
- )
- } else {
- BaseFeeParams::new(denominator as u128, elasticity as u128).into()
- }
- } else {
- BaseFeeParams::ethereum().into()
+ let mut info = Self {
+ optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo::extract_from(
+ &genesis.config.extra_fields,
+ )
+ .unwrap_or_default(),
+ ..Default::default()
};
+ if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info {
+ if let (Some(elasticity), Some(denominator)) = (
+ optimism_base_fee_info.eip1559_elasticity,
+ optimism_base_fee_info.eip1559_denominator,
+ ) {
+ let base_fee_params = if let Some(canyon_denominator) =
+ optimism_base_fee_info.eip1559_denominator_canyon
+ {
+ BaseFeeParamsKind::Variable(
+ vec![
+ (
+ EthereumHardfork::London.boxed(),
+ BaseFeeParams::new(denominator as u128, elasticity as u128),
+ ),
+ (
+ OptimismHardfork::Canyon.boxed(),
+ BaseFeeParams::new(canyon_denominator as u128, elasticity as u128),
+ ),
+ ]
+ .into(),
+ )
+ } else {
+ BaseFeeParams::new(denominator as u128, elasticity as u128).into()
+ };
- Self {
- bedrock_block: genesis
- .config
- .extra_fields
- .get("bedrockBlock")
- .and_then(|value| value.as_u64()),
- regolith_time: genesis
- .config
- .extra_fields
- .get("regolithTime")
- .and_then(|value| value.as_u64()),
- canyon_time: genesis
- .config
- .extra_fields
- .get("canyonTime")
- .and_then(|value| value.as_u64()),
- ecotone_time: genesis
- .config
- .extra_fields
- .get("ecotoneTime")
- .and_then(|value| value.as_u64()),
- fjord_time: genesis
- .config
- .extra_fields
- .get("fjordTime")
- .and_then(|value| value.as_u64()),
- base_fee_params,
+ info.base_fee_params = base_fee_params;
+ }
}
+
+ info
}
}
#[cfg(test)]
mod tests {
use super::*;
- use crate::{b256, hex, proofs::IntoTrieAccount, ChainConfig, GenesisAccount};
+ use alloy_chains::Chain;
+ use alloy_genesis::{ChainConfig, GenesisAccount};
+ use alloy_primitives::{b256, hex};
+ use core::ops::Deref;
+ use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head};
+ use reth_trie_common::TrieAccount;
use std::{collections::HashMap, str::FromStr};
+
+ #[cfg(feature = "optimism")]
+ use reth_ethereum_forks::OptimismHardforks;
+
fn test_fork_ids(spec: &ChainSpec, cases: &[(Head, ForkId)]) {
for (block, expected_id) in cases {
let computed_id = spec.fork_id(block);
@@ -1740,14 +1125,14 @@ mod tests {
}
}
- fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(Hardfork, ForkId)]) {
+ fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(EthereumHardfork, ForkId)]) {
for (hardfork, expected_id) in cases {
if let Some(computed_id) = spec.hardfork_fork_id(*hardfork) {
assert_eq!(
expected_id, &computed_id,
"Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for hardfork {hardfork}"
);
- if matches!(hardfork, Hardfork::Shanghai) {
+ if matches!(hardfork, EthereumHardfork::Shanghai) {
if let Some(shangai_id) = spec.shanghai_fork_id() {
assert_eq!(
expected_id, &shangai_id,
@@ -1793,8 +1178,8 @@ Post-merge hard forks (timestamp based):
let spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(Genesis::default())
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Shanghai, ForkCondition::Never)
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Shanghai, ForkCondition::Never)
.build();
assert_eq!(
spec.display_hardforks().to_string(),
@@ -1809,21 +1194,21 @@ Post-merge hard forks (timestamp based):
let spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(Genesis::default())
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(0))
- .with_fork(Hardfork::Tangerine, ForkCondition::Block(0))
- .with_fork(Hardfork::SpuriousDragon, ForkCondition::Block(0))
- .with_fork(Hardfork::Byzantium, ForkCondition::Block(0))
- .with_fork(Hardfork::Constantinople, ForkCondition::Block(0))
- .with_fork(Hardfork::Istanbul, ForkCondition::Block(0))
- .with_fork(Hardfork::MuirGlacier, ForkCondition::Block(0))
- .with_fork(Hardfork::Berlin, ForkCondition::Block(0))
- .with_fork(Hardfork::London, ForkCondition::Block(0))
- .with_fork(Hardfork::ArrowGlacier, ForkCondition::Block(0))
- .with_fork(Hardfork::GrayGlacier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Byzantium, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Constantinople, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Istanbul, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::MuirGlacier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Berlin, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::London, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::ArrowGlacier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::GrayGlacier, ForkCondition::Block(0))
.build();
- assert_eq!(spec.hardforks().len(), 12, "12 forks should be active.");
+ assert_eq!(spec.deref().len(), 12, "12 forks should be active.");
assert_eq!(
spec.fork_id(&Head { number: 1, ..Default::default() }),
ForkId { hash: ForkHash::from(spec.genesis_hash()), next: 0 },
@@ -1837,16 +1222,16 @@ Post-merge hard forks (timestamp based):
let unique_spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis.clone())
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(1))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(1))
.build();
let duplicate_spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis)
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(1))
- .with_fork(Hardfork::Tangerine, ForkCondition::Block(1))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(1))
+ .with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(1))
.build();
assert_eq!(
@@ -1863,9 +1248,9 @@ Post-merge hard forks (timestamp based):
let happy_path_case = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis.clone())
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(73))
- .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73))
+ .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123))
.build();
let happy_path_head = happy_path_case.satisfy(ForkCondition::Timestamp(11313123));
let happy_path_expected = Head { number: 73, timestamp: 11313123, ..Default::default() };
@@ -1877,10 +1262,10 @@ Post-merge hard forks (timestamp based):
let multiple_timestamp_fork_case = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis.clone())
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(73))
- .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123))
- .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(11313398))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73))
+ .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123))
+ .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(11313398))
.build();
let multi_timestamp_head =
multiple_timestamp_fork_case.satisfy(ForkCondition::Timestamp(11313398));
@@ -1894,7 +1279,7 @@ Post-merge hard forks (timestamp based):
let no_block_fork_case = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis.clone())
- .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123))
+ .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123))
.build();
let no_block_fork_head = no_block_fork_case.satisfy(ForkCondition::Timestamp(11313123));
let no_block_fork_expected = Head { number: 0, timestamp: 11313123, ..Default::default() };
@@ -1906,16 +1291,16 @@ Post-merge hard forks (timestamp based):
let fork_cond_ttd_blocknum_case = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis.clone())
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(73))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73))
.with_fork(
- Hardfork::Paris,
+ EthereumHardfork::Paris,
ForkCondition::TTD {
fork_block: Some(101),
total_difficulty: U256::from(10_790_000),
},
)
- .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123))
+ .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123))
.build();
let fork_cond_ttd_blocknum_head =
fork_cond_ttd_blocknum_case.satisfy(ForkCondition::Timestamp(11313123));
@@ -1932,8 +1317,8 @@ Post-merge hard forks (timestamp based):
let fork_cond_block_only_case = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(empty_genesis)
- .with_fork(Hardfork::Frontier, ForkCondition::Block(0))
- .with_fork(Hardfork::Homestead, ForkCondition::Block(73))
+ .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0))
+ .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73))
.build();
let fork_cond_block_only_head = fork_cond_block_only_case.satisfy(ForkCondition::Block(73));
let fork_cond_block_only_expected = Head { number: 73, ..Default::default() };
@@ -1961,117 +1346,69 @@ Post-merge hard forks (timestamp based):
&MAINNET,
&[
(
- Hardfork::Frontier,
+ EthereumHardfork::Frontier,
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
),
(
- Hardfork::Homestead,
+ EthereumHardfork::Homestead,
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
),
- (Hardfork::Dao, ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }),
(
- Hardfork::Tangerine,
+ EthereumHardfork::Dao,
+ ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
+ ),
+ (
+ EthereumHardfork::Tangerine,
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
),
(
- Hardfork::SpuriousDragon,
+ EthereumHardfork::SpuriousDragon,
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
),
(
- Hardfork::Byzantium,
+ EthereumHardfork::Byzantium,
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
),
(
- Hardfork::Constantinople,
+ EthereumHardfork::Constantinople,
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
),
(
- Hardfork::Petersburg,
+ EthereumHardfork::Petersburg,
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
),
(
- Hardfork::Istanbul,
+ EthereumHardfork::Istanbul,
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
),
(
- Hardfork::MuirGlacier,
+ EthereumHardfork::MuirGlacier,
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
),
(
- Hardfork::Berlin,
+ EthereumHardfork::Berlin,
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
),
(
- Hardfork::London,
+ EthereumHardfork::London,
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
),
(
- Hardfork::ArrowGlacier,
+ EthereumHardfork::ArrowGlacier,
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
),
(
- Hardfork::GrayGlacier,
+ EthereumHardfork::GrayGlacier,
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
),
(
- Hardfork::Shanghai,
+ EthereumHardfork::Shanghai,
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
),
- (Hardfork::Cancun, ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 }),
- ],
- );
- }
-
- #[test]
- fn goerli_hardfork_fork_ids() {
- test_hardfork_fork_ids(
- &GOERLI,
- &[
- (
- Hardfork::Frontier,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Hardfork::Homestead,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Hardfork::Tangerine,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Hardfork::SpuriousDragon,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Hardfork::Byzantium,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Hardfork::Constantinople,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Hardfork::Petersburg,
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
(
- Hardfork::Istanbul,
- ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 },
- ),
- (
- Hardfork::Berlin,
- ForkId { hash: ForkHash([0x75, 0x7a, 0x1c, 0x47]), next: 5062605 },
- ),
- (
- Hardfork::London,
- ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 },
- ),
- (
- Hardfork::Shanghai,
- ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 },
+ EthereumHardfork::Cancun,
+ ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 },
),
- (Hardfork::Cancun, ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }),
],
);
}
@@ -2082,54 +1419,57 @@ Post-merge hard forks (timestamp based):
&SEPOLIA,
&[
(
- Hardfork::Frontier,
+ EthereumHardfork::Frontier,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Homestead,
+ EthereumHardfork::Homestead,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Tangerine,
+ EthereumHardfork::Tangerine,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::SpuriousDragon,
+ EthereumHardfork::SpuriousDragon,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Byzantium,
+ EthereumHardfork::Byzantium,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Constantinople,
+ EthereumHardfork::Constantinople,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Petersburg,
+ EthereumHardfork::Petersburg,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Istanbul,
+ EthereumHardfork::Istanbul,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Berlin,
+ EthereumHardfork::Berlin,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::London,
+ EthereumHardfork::London,
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
),
(
- Hardfork::Paris,
+ EthereumHardfork::Paris,
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
),
(
- Hardfork::Shanghai,
+ EthereumHardfork::Shanghai,
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
),
- (Hardfork::Cancun, ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 0 }),
+ (
+ EthereumHardfork::Cancun,
+ ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 0 },
+ ),
],
);
}
@@ -2248,63 +1588,6 @@ Post-merge hard forks (timestamp based):
)
}
- #[test]
- fn goerli_forkids() {
- test_fork_ids(
- &GOERLI,
- &[
- (
- Head { number: 0, ..Default::default() },
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Head { number: 1561650, ..Default::default() },
- ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 },
- ),
- (
- Head { number: 1561651, ..Default::default() },
- ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 },
- ),
- (
- Head { number: 4460643, ..Default::default() },
- ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 },
- ),
- (
- Head { number: 4460644, ..Default::default() },
- ForkId { hash: ForkHash([0x75, 0x7a, 0x1c, 0x47]), next: 5062605 },
- ),
- (
- Head { number: 5062605, ..Default::default() },
- ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 },
- ),
- (
- Head { number: 6000000, timestamp: 1678832735, ..Default::default() },
- ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 },
- ),
- // First Shanghai block
- (
- Head { number: 6000001, timestamp: 1678832736, ..Default::default() },
- ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 },
- ),
- // Future Shanghai block
- (
- Head { number: 6500002, timestamp: 1678832736, ..Default::default() },
- ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 },
- ),
- // First Cancun block
- (
- Head { number: 6500003, timestamp: 1705473120, ..Default::default() },
- ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 },
- ),
- // Future Cancun block
- (
- Head { number: 6500003, timestamp: 2705473120, ..Default::default() },
- ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 },
- ),
- ],
- );
- }
-
#[test]
fn sepolia_forkids() {
test_fork_ids(
@@ -2630,8 +1913,8 @@ Post-merge hard forks (timestamp based):
cancun_time: u64,
) -> ChainSpec {
builder
- .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(shanghai_time))
- .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(cancun_time))
+ .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(shanghai_time))
+ .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(cancun_time))
.build()
}
@@ -2686,14 +1969,14 @@ Post-merge hard forks (timestamp based):
let terminal_block_ttd = U256::from(58750003716598352816469_u128);
let terminal_block_difficulty = U256::from(11055787484078698_u128);
assert!(!chainspec
- .fork(Hardfork::Paris)
+ .fork(EthereumHardfork::Paris)
.active_at_ttd(terminal_block_ttd, terminal_block_difficulty));
// Check that Paris is active on first PoS block #15537394.
let first_pos_block_ttd = U256::from(58750003716598352816469_u128);
let first_pos_difficulty = U256::ZERO;
assert!(chainspec
- .fork(Hardfork::Paris)
+ .fork(EthereumHardfork::Paris)
.active_at_ttd(first_pos_block_ttd, first_pos_difficulty));
}
@@ -2769,55 +2052,64 @@ Post-merge hard forks (timestamp based):
// assert a bunch of hardforks that should be set
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Homestead).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Homestead).unwrap(),
+ ForkCondition::Block(0)
);
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Tangerine).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Tangerine).unwrap(),
+ ForkCondition::Block(0)
);
assert_eq!(
- chainspec.hardforks.get(&Hardfork::SpuriousDragon).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::SpuriousDragon).unwrap(),
+ ForkCondition::Block(0)
);
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Byzantium).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Byzantium).unwrap(),
+ ForkCondition::Block(0)
);
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Constantinople).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Constantinople).unwrap(),
+ ForkCondition::Block(0)
);
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Petersburg).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Petersburg).unwrap(),
+ ForkCondition::Block(0)
);
- assert_eq!(chainspec.hardforks.get(&Hardfork::Istanbul).unwrap(), &ForkCondition::Block(0));
assert_eq!(
- chainspec.hardforks.get(&Hardfork::MuirGlacier).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Istanbul).unwrap(),
+ ForkCondition::Block(0)
);
- assert_eq!(chainspec.hardforks.get(&Hardfork::Berlin).unwrap(), &ForkCondition::Block(0));
- assert_eq!(chainspec.hardforks.get(&Hardfork::London).unwrap(), &ForkCondition::Block(0));
assert_eq!(
- chainspec.hardforks.get(&Hardfork::ArrowGlacier).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::MuirGlacier).unwrap(),
+ ForkCondition::Block(0)
);
assert_eq!(
- chainspec.hardforks.get(&Hardfork::GrayGlacier).unwrap(),
- &ForkCondition::Block(0)
+ chainspec.hardforks.get(EthereumHardfork::Berlin).unwrap(),
+ ForkCondition::Block(0)
+ );
+ assert_eq!(
+ chainspec.hardforks.get(EthereumHardfork::London).unwrap(),
+ ForkCondition::Block(0)
+ );
+ assert_eq!(
+ chainspec.hardforks.get(EthereumHardfork::ArrowGlacier).unwrap(),
+ ForkCondition::Block(0)
+ );
+ assert_eq!(
+ chainspec.hardforks.get(EthereumHardfork::GrayGlacier).unwrap(),
+ ForkCondition::Block(0)
);
// including time based hardforks
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Shanghai).unwrap(),
- &ForkCondition::Timestamp(0)
+ chainspec.hardforks.get(EthereumHardfork::Shanghai).unwrap(),
+ ForkCondition::Timestamp(0)
);
// including time based hardforks
assert_eq!(
- chainspec.hardforks.get(&Hardfork::Cancun).unwrap(),
- &ForkCondition::Timestamp(1)
+ chainspec.hardforks.get(EthereumHardfork::Cancun).unwrap(),
+ ForkCondition::Timestamp(1)
);
// alloc key -> expected rlp mapping
@@ -2829,10 +2121,7 @@ Post-merge hard forks (timestamp based):
for (key, expected_rlp) in key_rlp {
let account = chainspec.genesis.alloc.get(&key).expect("account should exist");
- assert_eq!(
- &alloy_rlp::encode(IntoTrieAccount::to_trie_account(account.clone())),
- expected_rlp
- );
+ assert_eq!(&alloy_rlp::encode(TrieAccount::from(account.clone())), expected_rlp);
}
assert_eq!(chainspec.genesis_hash, None);
@@ -2908,8 +2197,7 @@ Post-merge hard forks (timestamp based):
}
"#;
- let _genesis = serde_json::from_str::(hive_json).unwrap();
- let genesis = serde_json::from_str::(hive_json).unwrap();
+ let genesis = serde_json::from_str::(hive_json).unwrap();
let chainspec: ChainSpec = genesis.into();
assert_eq!(chainspec.genesis_hash, None);
assert_eq!(chainspec.chain, Chain::from_named(NamedChain::Optimism));
@@ -2917,14 +2205,14 @@ Post-merge hard forks (timestamp based):
hex!("9a6049ac535e3dc7436c189eaa81c73f35abd7f282ab67c32944ff0301d63360").into();
assert_eq!(chainspec.genesis_header().state_root, expected_state_root);
let hard_forks = vec![
- Hardfork::Byzantium,
- Hardfork::Homestead,
- Hardfork::Istanbul,
- Hardfork::Petersburg,
- Hardfork::Constantinople,
+ EthereumHardfork::Byzantium,
+ EthereumHardfork::Homestead,
+ EthereumHardfork::Istanbul,
+ EthereumHardfork::Petersburg,
+ EthereumHardfork::Constantinople,
];
- for ref fork in hard_forks {
- assert_eq!(chainspec.hardforks.get(fork).unwrap(), &ForkCondition::Block(0));
+ for fork in hard_forks {
+ assert_eq!(chainspec.hardforks.get(fork).unwrap(), ForkCondition::Block(0));
}
let expected_hash: B256 =
@@ -3094,13 +2382,7 @@ Post-merge hard forks (timestamp based):
#[test]
fn test_parse_prague_genesis_all_formats() {
let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661, "pragueTime": 4662},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#;
- let genesis: AllGenesisFormats = serde_json::from_str(s).unwrap();
-
- // this should be the genesis format
- let genesis = match genesis {
- AllGenesisFormats::Geth(genesis) => genesis,
- _ => panic!("expected geth genesis format"),
- };
+ let genesis: Genesis = serde_json::from_str(s).unwrap();
// assert that the alloc was picked up
let acc = genesis
@@ -3117,13 +2399,7 @@ Post-merge hard forks (timestamp based):
#[test]
fn test_parse_cancun_genesis_all_formats() {
let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#;
- let genesis: AllGenesisFormats = serde_json::from_str(s).unwrap();
-
- // this should be the genesis format
- let genesis = match genesis {
- AllGenesisFormats::Geth(genesis) => genesis,
- _ => panic!("expected geth genesis format"),
- };
+ let genesis: Genesis = serde_json::from_str(s).unwrap();
// assert that the alloc was picked up
let acc = genesis
@@ -3185,12 +2461,12 @@ Post-merge hard forks (timestamp based):
#[test]
fn holesky_paris_activated_at_genesis() {
assert!(HOLESKY
- .fork(Hardfork::Paris)
+ .fork(EthereumHardfork::Paris)
.active_at_ttd(HOLESKY.genesis.difficulty, HOLESKY.genesis.difficulty));
}
#[test]
- fn test_all_genesis_formats_deserialization() {
+ fn test_genesis_format_deserialization() {
// custom genesis with chain config
let config = ChainConfig {
chain_id: 2600,
@@ -3228,22 +2504,9 @@ Post-merge hard forks (timestamp based):
// ensure genesis is deserialized correctly
let serialized_genesis = serde_json::to_string(&genesis).unwrap();
- let deserialized_genesis: AllGenesisFormats =
- serde_json::from_str(&serialized_genesis).unwrap();
- assert!(matches!(deserialized_genesis, AllGenesisFormats::Geth(_)));
+ let deserialized_genesis: Genesis = serde_json::from_str(&serialized_genesis).unwrap();
- // build chain
- let chain_spec = ChainSpecBuilder::default()
- .chain(2600.into())
- .genesis(genesis)
- .cancun_activated()
- .build();
-
- // ensure chain spec is deserialized correctly
- let serialized_chain_spec = serde_json::to_string(&chain_spec).unwrap();
- let deserialized_chain_spec: AllGenesisFormats =
- serde_json::from_str(&serialized_chain_spec).unwrap();
- assert!(matches!(deserialized_chain_spec, AllGenesisFormats::Reth(_)))
+ assert_eq!(genesis, deserialized_genesis);
}
#[test]
@@ -3252,13 +2515,16 @@ Post-merge hard forks (timestamp based):
chain: Chain::mainnet(),
genesis: Genesis::default(),
genesis_hash: None,
- hardforks: BTreeMap::from([(Hardfork::Frontier, ForkCondition::Never)]),
+ hardforks: ChainHardforks::new(vec![(
+ EthereumHardfork::Frontier.boxed(),
+ ForkCondition::Never,
+ )]),
paris_block_and_final_difficulty: None,
deposit_contract: None,
..Default::default()
};
- assert_eq!(spec.hardfork_fork_id(Hardfork::Frontier), None);
+ assert_eq!(spec.hardfork_fork_id(EthereumHardfork::Frontier), None);
}
#[test]
@@ -3267,13 +2533,16 @@ Post-merge hard forks (timestamp based):
chain: Chain::mainnet(),
genesis: Genesis::default(),
genesis_hash: None,
- hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Never)]),
+ hardforks: ChainHardforks::new(vec![(
+ EthereumHardfork::Shanghai.boxed(),
+ ForkCondition::Never,
+ )]),
paris_block_and_final_difficulty: None,
deposit_contract: None,
..Default::default()
};
- assert_eq!(spec.hardfork_fork_filter(Hardfork::Shanghai), None);
+ assert_eq!(spec.hardfork_fork_filter(EthereumHardfork::Shanghai), None);
}
#[test]
@@ -3391,17 +2660,17 @@ Post-merge hard forks (timestamp based):
BaseFeeParamsKind::Constant(BaseFeeParams::new(70, 60))
);
- assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 0));
+ assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0));
- assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 30));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 40));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 50));
+ assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50));
}
#[cfg(feature = "optimism")]
@@ -3452,23 +2721,100 @@ Post-merge hard forks (timestamp based):
chain_spec.base_fee_params,
BaseFeeParamsKind::Variable(
vec![
- (Hardfork::London, BaseFeeParams::new(70, 60)),
- (Hardfork::Canyon, BaseFeeParams::new(80, 60)),
+ (EthereumHardfork::London.boxed(), BaseFeeParams::new(70, 60)),
+ (OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)),
]
.into()
)
);
- assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0));
- assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 0));
-
- assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 30));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 40));
- assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 50));
+ assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0));
+ assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0));
+
+ assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40));
+ assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50));
+ }
+
+ #[cfg(feature = "optimism")]
+ #[test]
+ fn parse_genesis_optimism_with_variable_base_fee_params() {
+ use op_alloy_rpc_types::genesis::OptimismBaseFeeInfo;
+
+ let geth_genesis = r#"
+ {
+ "config": {
+ "chainId": 8453,
+ "homesteadBlock": 0,
+ "eip150Block": 0,
+ "eip155Block": 0,
+ "eip158Block": 0,
+ "byzantiumBlock": 0,
+ "constantinopleBlock": 0,
+ "petersburgBlock": 0,
+ "istanbulBlock": 0,
+ "muirGlacierBlock": 0,
+ "berlinBlock": 0,
+ "londonBlock": 0,
+ "arrowGlacierBlock": 0,
+ "grayGlacierBlock": 0,
+ "mergeNetsplitBlock": 0,
+ "bedrockBlock": 0,
+ "regolithTime": 15,
+ "terminalTotalDifficulty": 0,
+ "terminalTotalDifficultyPassed": true,
+ "optimism": {
+ "eip1559Elasticity": 6,
+ "eip1559Denominator": 50
+ }
+ }
+ }
+ "#;
+ let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap();
+ let chainspec = ChainSpec::from(genesis.clone());
+
+ let actual_chain_id = genesis.config.chain_id;
+ assert_eq!(actual_chain_id, 8453);
+
+ assert_eq!(
+ chainspec.hardforks.get(EthereumHardfork::Istanbul),
+ Some(ForkCondition::Block(0))
+ );
+
+ let actual_bedrock_block = genesis.config.extra_fields.get("bedrockBlock");
+ assert_eq!(actual_bedrock_block, Some(serde_json::Value::from(0)).as_ref());
+ let actual_canyon_timestamp = genesis.config.extra_fields.get("canyonTime");
+ assert_eq!(actual_canyon_timestamp, None);
+
+ assert!(genesis.config.terminal_total_difficulty_passed);
+
+ let optimism_object = genesis.config.extra_fields.get("optimism").unwrap();
+ let optimism_base_fee_info =
+ serde_json::from_value::(optimism_object.clone()).unwrap();
+
+ assert_eq!(
+ optimism_base_fee_info,
+ OptimismBaseFeeInfo {
+ eip1559_elasticity: Some(6),
+ eip1559_denominator: Some(50),
+ eip1559_denominator_canyon: None,
+ }
+ );
+ assert_eq!(
+ chainspec.base_fee_params,
+ BaseFeeParamsKind::Constant(BaseFeeParams {
+ max_change_denominator: 50,
+ elasticity_multiplier: 6,
+ })
+ );
+
+ assert!(chainspec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0));
+
+ assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20));
}
}
diff --git a/crates/cli/cli/Cargo.toml b/crates/cli/cli/Cargo.toml
new file mode 100644
index 000000000000..83ea9da6f9bb
--- /dev/null
+++ b/crates/cli/cli/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "reth-cli"
+version.workspace = true
+edition.workspace = true
+rust-version.workspace = true
+license.workspace = true
+homepage.workspace = true
+repository.workspace = true
+
+[lints]
+
+
+[dependencies]
+# reth
+reth-cli-runner.workspace = true
+reth-chainspec.workspace = true
+eyre.workspace = true
+
+# misc
+clap.workspace = true
diff --git a/crates/cli/cli/src/chainspec.rs b/crates/cli/cli/src/chainspec.rs
new file mode 100644
index 000000000000..4c1b4372fd0b
--- /dev/null
+++ b/crates/cli/cli/src/chainspec.rs
@@ -0,0 +1,25 @@
+use clap::builder::TypedValueParser;
+use reth_chainspec::ChainSpec;
+use std::sync::Arc;
+
+/// Trait for parsing chain specifications.
+///
+/// This trait extends [`clap::builder::TypedValueParser`] to provide a parser for chain
+/// specifications. Implementers of this trait must provide a list of supported chains and a
+/// function to parse a given string into a [`ChainSpec`].
+pub trait ChainSpecParser: TypedValueParser> + Default {
+ /// List of supported chains.
+ const SUPPORTED_CHAINS: &'static [&'static str];
+
+ /// Parses the given string into a [`ChainSpec`].
+ ///
+ /// # Arguments
+ ///
+ /// * `s` - A string slice that holds the chain spec to be parsed.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the input string cannot be parsed into a valid
+ /// [`ChainSpec`].
+ fn parse(&self, s: &str) -> eyre::Result>;
+}
diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs
new file mode 100644
index 000000000000..9e078e82f221
--- /dev/null
+++ b/crates/cli/cli/src/lib.rs
@@ -0,0 +1,70 @@
+//! Cli abstraction for reth based nodes.
+
+#![doc(
+ html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
+ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
+ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
+)]
+#![cfg_attr(not(test), warn(unused_crate_dependencies))]
+#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
+
+use std::{borrow::Cow, ffi::OsString};
+
+use reth_cli_runner::CliRunner;
+
+use clap::{Error, Parser};
+
+pub mod chainspec;
+
+/// Reth based node cli.
+///
+/// This trait is supposed to be implemented by the main struct of the CLI.
+///
+/// It provides commonly used functionality for running commands and information about the CL, such
+/// as the name and version.
+pub trait RethCli: Sized {
+ /// The name of the implementation, eg. `reth`, `op-reth`, etc.
+ fn name(&self) -> Cow<'static, str>;
+
+ /// The version of the node, such as `reth/v1.0.0`
+ fn version(&self) -> Cow<'static, str>;
+
+ /// Parse args from iterator from [`std::env::args_os()`].
+ fn parse_args() -> Result
+ where
+ Self: Parser + Sized,
+ {
+ ::try_parse_from(std::env::args_os())
+ }
+
+ /// Parse args from the given iterator.
+ fn try_parse_from(itr: I) -> Result
+ where
+ Self: Parser + Sized,
+ I: IntoIterator
- ,
+ T: Into + Clone,
+ {
+ ::try_parse_from(itr)
+ }
+
+ /// Executes a command.
+ fn with_runner(self, f: F) -> R
+ where
+ F: FnOnce(Self, CliRunner) -> R,
+ {
+ let runner = CliRunner::default();
+
+ f(self, runner)
+ }
+
+ /// Parses and executes a command.
+ fn execute(f: F) -> Result
+ where
+ Self: Parser + Sized,
+ F: FnOnce(Self, CliRunner) -> R,
+ {
+ let cli = Self::parse_args()?;
+
+ Ok(cli.with_runner(f))
+ }
+}
diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml
new file mode 100644
index 000000000000..1bb1a4e00e2f
--- /dev/null
+++ b/crates/cli/commands/Cargo.toml
@@ -0,0 +1,78 @@
+[package]
+name = "reth-cli-commands"
+version.workspace = true
+edition.workspace = true
+rust-version.workspace = true
+license.workspace = true
+homepage.workspace = true
+repository.workspace = true
+
+[lints]
+
+[dependencies]
+reth-beacon-consensus.workspace = true
+reth-chainspec.workspace = true
+reth-cli-runner.workspace = true
+reth-cli-util.workspace = true
+reth-config.workspace = true
+reth-consensus.workspace = true
+reth-db = { workspace = true, features = ["mdbx"] }
+reth-db-api.workspace = true
+reth-db-common.workspace = true
+reth-downloaders.workspace = true
+reth-evm.workspace = true
+reth-exex.workspace = true
+reth-fs-util.workspace = true
+reth-network = { workspace = true, features = ["serde"] }
+reth-network-p2p.workspace = true
+reth-node-core.workspace = true
+reth-primitives.workspace = true
+reth-provider.workspace = true
+reth-prune.workspace = true
+reth-stages.workspace = true
+reth-static-file-types.workspace = true
+reth-static-file.workspace = true
+reth-trie = { workspace = true, features = ["metrics"] }
+
+tokio.workspace = true
+itertools.workspace = true
+
+# misc
+ahash = "0.8"
+human_bytes = "0.4.1"
+eyre.workspace = true
+clap = { workspace = true, features = ["derive", "env"] }
+serde.workspace = true
+serde_json.workspace = true
+tracing.workspace = true
+backon.workspace = true
+
+# io
+fdlimit.workspace = true
+confy.workspace = true
+toml = { workspace = true, features = ["display"] }
+
+# tui
+comfy-table = "7.0"
+crossterm = "0.27.0"
+ratatui = { version = "0.27", default-features = false, features = [
+ "crossterm",
+] }
+
+# metrics
+metrics-process.workspace = true
+
+# reth test-vectors
+proptest = { workspace = true, optional = true }
+arbitrary = { workspace = true, optional = true }
+proptest-arbitrary-interop = { workspace = true, optional = true }
+
+[features]
+default = []
+dev = [
+ "dep:proptest",
+ "dep:arbitrary",
+ "dep:proptest-arbitrary-interop",
+ "reth-primitives/arbitrary",
+ "reth-db-api/arbitrary"
+]
diff --git a/bin/reth/src/commands/common.rs b/crates/cli/commands/src/common.rs
similarity index 88%
rename from bin/reth/src/commands/common.rs
rename to crates/cli/commands/src/common.rs
index be12fa3156db..ce733e938cfd 100644
--- a/bin/reth/src/commands/common.rs
+++ b/crates/cli/commands/src/common.rs
@@ -2,6 +2,7 @@
use clap::Parser;
use reth_beacon_consensus::EthBeaconConsensus;
+use reth_chainspec::ChainSpec;
use reth_config::{config::EtlConfig, Config};
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
use reth_db_common::init::init_genesis;
@@ -9,18 +10,17 @@ use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHe
use reth_evm::noop::NoopBlockExecutorProvider;
use reth_node_core::{
args::{
- utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
+ utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS},
DatabaseArgs, DatadirArgs,
},
dirs::{ChainPath, DataDirPath},
};
-use reth_primitives::ChainSpec;
-use reth_provider::{
- providers::StaticFileProvider, HeaderSyncMode, ProviderFactory, StaticFileProviderFactory,
-};
+use reth_primitives::B256;
+use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory};
use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget};
use reth_static_file::StaticFileProducer;
use std::{path::PathBuf, sync::Arc};
+use tokio::sync::watch;
use tracing::{debug, info, warn};
/// Struct to hold config and datadir paths
@@ -42,7 +42,7 @@ pub struct EnvironmentArgs {
value_name = "CHAIN_OR_PATH",
long_help = chain_help(),
default_value = SUPPORTED_CHAINS[0],
- value_parser = genesis_value_parser
+ value_parser = chain_value_parser
)]
pub chain: Arc,
@@ -65,7 +65,11 @@ impl EnvironmentArgs {
}
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
- let mut config: Config = confy::load_path(config_path).unwrap_or_default();
+ let mut config: Config = confy::load_path(config_path)
+ .inspect_err(
+ |err| warn!(target: "reth::cli", %err, "Failed to load config file, using default"),
+ )
+ .unwrap_or_default();
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
if config.stages.etl.dir.is_none() {
@@ -105,7 +109,10 @@ impl EnvironmentArgs {
static_file_provider: StaticFileProvider,
) -> eyre::Result>> {
let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning());
- let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider);
+ let prune_modes =
+ config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default();
+ let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider)
+ .with_prune_modes(prune_modes.clone());
info!(target: "reth::cli", "Verifying storage consistency.");
@@ -119,19 +126,19 @@ impl EnvironmentArgs {
return Ok(factory);
}
- let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default();
-
// Highly unlikely to happen, and given its destructive nature, it's better to panic
// instead.
assert_ne!(unwind_target, PipelineTarget::Unwind(0), "A static file <> database inconsistency was found that would trigger an unwind to block 0");
info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check.");
+ let (_tip_tx, tip_rx) = watch::channel(B256::ZERO);
+
// Builds and executes an unwind-only pipeline
let mut pipeline = Pipeline::builder()
.add_stages(DefaultStages::new(
factory.clone(),
- HeaderSyncMode::Continuous,
+ tip_rx,
Arc::new(EthBeaconConsensus::new(self.chain.clone())),
NoopHeaderDownloader::default(),
NoopBodiesDownloader::default(),
diff --git a/bin/reth/src/commands/config_cmd.rs b/crates/cli/commands/src/config_cmd.rs
similarity index 100%
rename from bin/reth/src/commands/config_cmd.rs
rename to crates/cli/commands/src/config_cmd.rs
diff --git a/bin/reth/src/commands/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs
similarity index 97%
rename from bin/reth/src/commands/db/checksum.rs
rename to crates/cli/commands/src/db/checksum.rs
index b0dbb1f7732b..766f69041587 100644
--- a/bin/reth/src/commands/db/checksum.rs
+++ b/crates/cli/commands/src/db/checksum.rs
@@ -1,11 +1,9 @@
-use crate::{
- commands::db::get::{maybe_json_value_parser, table_key},
- utils::DbTool,
-};
+use crate::db::get::{maybe_json_value_parser, table_key};
use ahash::RandomState;
use clap::Parser;
use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables};
use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx};
+use reth_db_common::DbTool;
use std::{
hash::{BuildHasher, Hasher},
sync::Arc,
diff --git a/bin/reth/src/commands/db/clear.rs b/crates/cli/commands/src/db/clear.rs
similarity index 96%
rename from bin/reth/src/commands/db/clear.rs
rename to crates/cli/commands/src/db/clear.rs
index 76c1b97e38ad..b9edf458d3f4 100644
--- a/bin/reth/src/commands/db/clear.rs
+++ b/crates/cli/commands/src/db/clear.rs
@@ -5,8 +5,8 @@ use reth_db_api::{
table::Table,
transaction::{DbTx, DbTxMut},
};
-use reth_primitives::{static_file::find_fixed_range, StaticFileSegment};
use reth_provider::{ProviderFactory, StaticFileProviderFactory};
+use reth_static_file_types::{find_fixed_range, StaticFileSegment};
/// The arguments for the `reth db clear` command
#[derive(Parser, Debug)]
diff --git a/bin/reth/src/commands/db/diff.rs b/crates/cli/commands/src/db/diff.rs
similarity index 99%
rename from bin/reth/src/commands/db/diff.rs
rename to crates/cli/commands/src/db/diff.rs
index fda004f3c34c..41c3ab0e911c 100644
--- a/bin/reth/src/commands/db/diff.rs
+++ b/crates/cli/commands/src/db/diff.rs
@@ -1,11 +1,11 @@
-use crate::{
- args::DatabaseArgs,
- dirs::{DataDirPath, PlatformPath},
- utils::DbTool,
-};
use clap::Parser;
use reth_db::{open_db_read_only, tables_to_generic, DatabaseEnv, Tables};
use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx};
+use reth_db_common::DbTool;
+use reth_node_core::{
+ args::DatabaseArgs,
+ dirs::{DataDirPath, PlatformPath},
+};
use std::{
collections::HashMap,
fmt::Debug,
diff --git a/bin/reth/src/commands/db/get.rs b/crates/cli/commands/src/db/get.rs
similarity index 98%
rename from bin/reth/src/commands/db/get.rs
rename to crates/cli/commands/src/db/get.rs
index 699a31471802..cd721a1db4b1 100644
--- a/bin/reth/src/commands/db/get.rs
+++ b/crates/cli/commands/src/db/get.rs
@@ -1,4 +1,3 @@
-use crate::utils::DbTool;
use clap::Parser;
use reth_db::{
static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask},
@@ -8,8 +7,10 @@ use reth_db_api::{
database::Database,
table::{Decompress, DupSort, Table},
};
-use reth_primitives::{BlockHash, Header, StaticFileSegment};
+use reth_db_common::DbTool;
+use reth_primitives::{BlockHash, Header};
use reth_provider::StaticFileProviderFactory;
+use reth_static_file_types::StaticFileSegment;
use tracing::error;
/// The arguments for the `reth db get` command
diff --git a/bin/reth/src/commands/db/list.rs b/crates/cli/commands/src/db/list.rs
similarity index 99%
rename from bin/reth/src/commands/db/list.rs
rename to crates/cli/commands/src/db/list.rs
index 4689bbfdc0fa..59b3397154af 100644
--- a/bin/reth/src/commands/db/list.rs
+++ b/crates/cli/commands/src/db/list.rs
@@ -1,9 +1,9 @@
use super::tui::DbListTUI;
-use crate::utils::{DbTool, ListFilter};
use clap::Parser;
use eyre::WrapErr;
use reth_db::{DatabaseEnv, RawValue, TableViewer, Tables};
use reth_db_api::{database::Database, table::Table};
+use reth_db_common::{DbTool, ListFilter};
use reth_primitives::hex;
use std::{cell::RefCell, sync::Arc};
use tracing::error;
diff --git a/bin/reth/src/commands/db/mod.rs b/crates/cli/commands/src/db/mod.rs
similarity index 92%
rename from bin/reth/src/commands/db/mod.rs
rename to crates/cli/commands/src/db/mod.rs
index b4e4ded41aed..cba32fa5e55b 100644
--- a/bin/reth/src/commands/db/mod.rs
+++ b/crates/cli/commands/src/db/mod.rs
@@ -1,11 +1,7 @@
-//! Database debugging tool
-
-use crate::{
- commands::common::{AccessRights, Environment, EnvironmentArgs},
- utils::DbTool,
-};
+use crate::common::{AccessRights, Environment, EnvironmentArgs};
use clap::{Parser, Subcommand};
use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION};
+use reth_db_common::DbTool;
use std::io::{self, Write};
mod checksum;
@@ -71,6 +67,16 @@ impl Command {
let db_path = data_dir.db();
let static_files_path = data_dir.static_files();
+ // ensure the provided datadir exist
+ eyre::ensure!(
+ data_dir.data_dir().is_dir(),
+ "Datadir does not exist: {:?}",
+ data_dir.data_dir()
+ );
+
+ // ensure the provided database exist
+ eyre::ensure!(db_path.is_dir(), "Database does not exist: {:?}", db_path);
+
match self.command {
// TODO: We'll need to add this on the DB trait.
Subcommands::Stats(command) => {
diff --git a/bin/reth/src/commands/db/stats.rs b/crates/cli/commands/src/db/stats.rs
similarity index 98%
rename from bin/reth/src/commands/db/stats.rs
rename to crates/cli/commands/src/db/stats.rs
index 517b9c9e591f..37f7d617ba47 100644
--- a/bin/reth/src/commands/db/stats.rs
+++ b/crates/cli/commands/src/db/stats.rs
@@ -1,4 +1,4 @@
-use crate::{commands::db::checksum::ChecksumViewer, utils::DbTool};
+use crate::db::checksum::ChecksumViewer;
use clap::Parser;
use comfy_table::{Cell, Row, Table as ComfyTable};
use eyre::WrapErr;
@@ -6,10 +6,11 @@ use human_bytes::human_bytes;
use itertools::Itertools;
use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Tables};
use reth_db_api::database::Database;
+use reth_db_common::DbTool;
use reth_fs_util as fs;
use reth_node_core::dirs::{ChainPath, DataDirPath};
-use reth_primitives::static_file::{find_fixed_range, SegmentRangeInclusive};
use reth_provider::providers::StaticFileProvider;
+use reth_static_file_types::{find_fixed_range, SegmentRangeInclusive};
use std::{sync::Arc, time::Duration};
#[derive(Parser, Debug)]
diff --git a/bin/reth/src/commands/db/tui.rs b/crates/cli/commands/src/db/tui.rs
similarity index 100%
rename from bin/reth/src/commands/db/tui.rs
rename to crates/cli/commands/src/db/tui.rs
diff --git a/bin/reth/src/commands/dump_genesis.rs b/crates/cli/commands/src/dump_genesis.rs
similarity index 84%
rename from bin/reth/src/commands/dump_genesis.rs
rename to crates/cli/commands/src/dump_genesis.rs
index 843d3d18a64b..ae425ca8c29d 100644
--- a/bin/reth/src/commands/dump_genesis.rs
+++ b/crates/cli/commands/src/dump_genesis.rs
@@ -1,7 +1,7 @@
//! Command that dumps genesis block JSON configuration to stdout
-use crate::args::utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS};
use clap::Parser;
-use reth_primitives::ChainSpec;
+use reth_chainspec::ChainSpec;
+use reth_node_core::args::utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS};
use std::sync::Arc;
/// Dumps genesis block JSON configuration to stdout
@@ -15,7 +15,7 @@ pub struct DumpGenesisCommand {
value_name = "CHAIN_OR_PATH",
long_help = chain_help(),
default_value = SUPPORTED_CHAINS[0],
- value_parser = genesis_value_parser
+ value_parser = chain_value_parser
)]
chain: Arc,
}
@@ -39,7 +39,7 @@ mod tests {
DumpGenesisCommand::parse_from(["reth", "--chain", chain]);
assert_eq!(
Ok(args.chain.chain),
- chain.parse::(),
+ chain.parse::(),
"failed to parse chain {chain}"
);
}
diff --git a/bin/reth/src/commands/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs
similarity index 91%
rename from bin/reth/src/commands/init_cmd.rs
rename to crates/cli/commands/src/init_cmd.rs
index 22657f0c0255..933527cc565a 100644
--- a/bin/reth/src/commands/init_cmd.rs
+++ b/crates/cli/commands/src/init_cmd.rs
@@ -1,6 +1,6 @@
//! Command that initializes the node from a genesis file.
-use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
+use crate::common::{AccessRights, Environment, EnvironmentArgs};
use clap::Parser;
use reth_provider::BlockHashReader;
use tracing::info;
diff --git a/bin/reth/src/commands/init_state.rs b/crates/cli/commands/src/init_state.rs
similarity index 96%
rename from bin/reth/src/commands/init_state.rs
rename to crates/cli/commands/src/init_state.rs
index dbf45e5816a6..af26d15e0176 100644
--- a/bin/reth/src/commands/init_state.rs
+++ b/crates/cli/commands/src/init_state.rs
@@ -1,6 +1,6 @@
//! Command that initializes the node from a genesis file.
-use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
+use crate::common::{AccessRights, Environment, EnvironmentArgs};
use clap::Parser;
use reth_config::config::EtlConfig;
use reth_db_api::database::Database;
diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs
new file mode 100644
index 000000000000..16767544e7ca
--- /dev/null
+++ b/crates/cli/commands/src/lib.rs
@@ -0,0 +1,22 @@
+//! Commonly used reth CLI commands.
+
+#![doc(
+ html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
+ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
+ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
+)]
+#![cfg_attr(not(test), warn(unused_crate_dependencies))]
+#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
+
+pub mod common;
+pub mod config_cmd;
+pub mod db;
+pub mod dump_genesis;
+pub mod init_cmd;
+pub mod init_state;
+pub mod p2p;
+pub mod prune;
+pub mod recover;
+pub mod stage;
+#[cfg(feature = "dev")]
+pub mod test_vectors;
diff --git a/bin/reth/src/commands/p2p/mod.rs b/crates/cli/commands/src/p2p.rs
similarity index 60%
rename from bin/reth/src/commands/p2p/mod.rs
rename to crates/cli/commands/src/p2p.rs
index b57a2f07aaba..0fdefac8bd88 100644
--- a/bin/reth/src/commands/p2p/mod.rs
+++ b/crates/cli/commands/src/p2p.rs
@@ -1,28 +1,21 @@
//! P2P Debugging tool
-use crate::{
- args::{
- get_secret_key,
- utils::{chain_help, chain_spec_value_parser, hash_or_num_value_parser, SUPPORTED_CHAINS},
- DatabaseArgs, DiscoveryArgs, NetworkArgs,
- },
- utils::get_single_header,
-};
use backon::{ConstantBuilder, Retryable};
use clap::{Parser, Subcommand};
-use discv5::ListenConfig;
+use reth_chainspec::ChainSpec;
+use reth_cli_util::{get_secret_key, hash_or_num_value_parser};
use reth_config::Config;
-use reth_db::create_db;
use reth_network::NetworkConfigBuilder;
use reth_network_p2p::bodies::client::BodiesClient;
-use reth_node_core::args::DatadirArgs;
-use reth_primitives::{BlockHashOrNumber, ChainSpec};
-use reth_provider::{providers::StaticFileProvider, ProviderFactory};
-use std::{
- net::{IpAddr, SocketAddrV4, SocketAddrV6},
- path::PathBuf,
- sync::Arc,
+use reth_node_core::{
+ args::{
+ utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS},
+ DatabaseArgs, DatadirArgs, NetworkArgs,
+ },
+ utils::get_single_header,
};
+use reth_primitives::BlockHashOrNumber;
+use std::{path::PathBuf, sync::Arc};
/// `reth p2p` command
#[derive(Debug, Parser)]
@@ -39,7 +32,7 @@ pub struct Command {
value_name = "CHAIN_OR_PATH",
long_help = chain_help(),
default_value = SUPPORTED_CHAINS[0],
- value_parser = chain_spec_value_parser
+ value_parser = chain_value_parser
)]
chain: Arc,
@@ -79,18 +72,12 @@ pub enum Subcommands {
impl Command {
/// Execute `p2p` command
pub async fn execute(&self) -> eyre::Result<()> {
- let tempdir = tempfile::TempDir::new()?;
- let noop_db = Arc::new(create_db(tempdir.into_path(), self.db.database_args())?);
-
- // add network name to data dir
let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
let mut config: Config = confy::load_path(&config_path).unwrap_or_default();
- for peer in &self.network.trusted_peers {
- config.peers.trusted_nodes.insert(peer.resolve().await?);
- }
+ config.peers.trusted_nodes.extend(self.network.resolve_trusted_peers().await?);
if config.peers.trusted_nodes.is_empty() && self.network.trusted_only {
eyre::bail!("No trusted nodes. Set trusted peer with `--trusted-peer ` or set `--trusted-only` to `false`")
@@ -105,62 +92,20 @@ impl Command {
let rlpx_socket = (self.network.addr, self.network.port).into();
let boot_nodes = self.chain.bootnodes().unwrap_or_default();
- let network = NetworkConfigBuilder::new(p2p_secret_key)
+ let net = NetworkConfigBuilder::new(p2p_secret_key)
.peer_config(config.peers_config_with_basic_nodes_from_file(None))
.external_ip_resolver(self.network.nat)
.chain_spec(self.chain.clone())
.disable_discv4_discovery_if(self.chain.chain.is_optimism())
.boot_nodes(boot_nodes.clone())
.apply(|builder| {
- self.network
- .discovery
- .apply_to_builder(builder, rlpx_socket)
- .map_discv5_config_builder(|builder| {
- let DiscoveryArgs {
- discv5_addr,
- discv5_addr_ipv6,
- discv5_port,
- discv5_port_ipv6,
- discv5_lookup_interval,
- discv5_bootstrap_lookup_interval,
- discv5_bootstrap_lookup_countdown,
- ..
- } = self.network.discovery;
-
- // Use rlpx address if none given
- let discv5_addr_ipv4 = discv5_addr.or(match self.network.addr {
- IpAddr::V4(ip) => Some(ip),
- IpAddr::V6(_) => None,
- });
- let discv5_addr_ipv6 = discv5_addr_ipv6.or(match self.network.addr {
- IpAddr::V4(_) => None,
- IpAddr::V6(ip) => Some(ip),
- });
-
- builder
- .discv5_config(
- discv5::ConfigBuilder::new(ListenConfig::from_two_sockets(
- discv5_addr_ipv4
- .map(|addr| SocketAddrV4::new(addr, discv5_port)),
- discv5_addr_ipv6.map(|addr| {
- SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)
- }),
- ))
- .build(),
- )
- .add_unsigned_boot_nodes(boot_nodes.into_iter())
- .lookup_interval(discv5_lookup_interval)
- .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval)
- .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown)
- })
+ self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes)
})
- .build(Arc::new(ProviderFactory::new(
- noop_db,
- self.chain.clone(),
- StaticFileProvider::read_write(data_dir.static_files())?,
- )))
- .start_network()
+ .build_with_noop_provider()
+ .manager()
.await?;
+ let network = net.handle().clone();
+ tokio::task::spawn(net);
let fetch_client = network.fetch_client().await?;
let retries = self.retries.max(1);
diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs
new file mode 100644
index 000000000000..6cc5e033bc04
--- /dev/null
+++ b/crates/cli/commands/src/prune.rs
@@ -0,0 +1,42 @@
+//! Command that runs pruning without any limits.
+use crate::common::{AccessRights, Environment, EnvironmentArgs};
+use clap::Parser;
+use reth_prune::PrunerBuilder;
+use reth_static_file::StaticFileProducer;
+use tracing::info;
+
+/// Prunes according to the configuration without any limits
+#[derive(Debug, Parser)]
+pub struct PruneCommand {
+ #[command(flatten)]
+ env: EnvironmentArgs,
+}
+
+impl PruneCommand {
+ /// Execute the `prune` command
+ pub async fn execute(self) -> eyre::Result<()> {
+ let Environment { config, provider_factory, .. } = self.env.init(AccessRights::RW)?;
+ let prune_config = config.prune.unwrap_or_default();
+
+ // Copy data from database to static files
+ info!(target: "reth::cli", "Copying data from database to static files...");
+ let static_file_producer =
+ StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone());
+ let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min();
+ info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files");
+
+ // Delete data which has been copied to static files.
+ if let Some(prune_tip) = lowest_static_file_height {
+ info!(target: "reth::cli", ?prune_tip, ?prune_config, "Pruning data from database...");
+ // Run the pruner according to the configuration, and don't enforce any limits on it
+ let mut pruner = PrunerBuilder::new(prune_config)
+ .prune_delete_limit(usize::MAX)
+ .build(provider_factory);
+
+ pruner.run(prune_tip)?;
+ info!(target: "reth::cli", "Pruned data from database");
+ }
+
+ Ok(())
+ }
+}
diff --git a/bin/reth/src/commands/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs
similarity index 100%
rename from bin/reth/src/commands/recover/mod.rs
rename to crates/cli/commands/src/recover/mod.rs
diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs
similarity index 96%
rename from bin/reth/src/commands/recover/storage_tries.rs
rename to crates/cli/commands/src/recover/storage_tries.rs
index b1dbbfa88ce5..2b4087144805 100644
--- a/bin/reth/src/commands/recover/storage_tries.rs
+++ b/crates/cli/commands/src/recover/storage_tries.rs
@@ -1,4 +1,4 @@
-use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
+use crate::common::{AccessRights, Environment, EnvironmentArgs};
use clap::Parser;
use reth_cli_runner::CliContext;
use reth_db::tables;
diff --git a/bin/reth/src/commands/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs
similarity index 96%
rename from bin/reth/src/commands/stage/drop.rs
rename to crates/cli/commands/src/stage/drop.rs
index 8297eafef81a..8278185df09a 100644
--- a/bin/reth/src/commands/stage/drop.rs
+++ b/crates/cli/commands/src/stage/drop.rs
@@ -1,18 +1,17 @@
//! Database debugging tool
-
-use crate::{
- args::StageEnum,
- commands::common::{AccessRights, Environment, EnvironmentArgs},
- utils::DbTool,
-};
+use crate::common::{AccessRights, Environment, EnvironmentArgs};
use clap::Parser;
use itertools::Itertools;
use reth_db::{static_file::iter_static_files, tables, DatabaseEnv};
use reth_db_api::transaction::DbTxMut;
-use reth_db_common::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state};
-use reth_primitives::{static_file::find_fixed_range, StaticFileSegment};
+use reth_db_common::{
+ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state},
+ DbTool,
+};
+use reth_node_core::args::StageEnum;
use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory};
use reth_stages::StageId;
+use reth_static_file_types::{find_fixed_range, StaticFileSegment};
/// `reth drop-stage` command
#[derive(Debug, Parser)]
diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs
similarity index 88%
rename from bin/reth/src/commands/stage/dump/execution.rs
rename to crates/cli/commands/src/stage/dump/execution.rs
index b6d6721dcf8d..61fc5e41ceff 100644
--- a/bin/reth/src/commands/stage/dump/execution.rs
+++ b/crates/cli/commands/src/stage/dump/execution.rs
@@ -1,26 +1,32 @@
use super::setup;
-use crate::{macros::block_executor, utils::DbTool};
use reth_db::{tables, DatabaseEnv};
use reth_db_api::{
cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx,
};
+use reth_db_common::DbTool;
+use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider};
use reth_node_core::dirs::{ChainPath, DataDirPath};
-use reth_provider::{providers::StaticFileProvider, ChainSpecProvider, ProviderFactory};
+use reth_provider::{providers::StaticFileProvider, ProviderFactory};
use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput};
use tracing::info;
-pub(crate) async fn dump_execution_stage(
+pub(crate) async fn dump_execution_stage(
db_tool: &DbTool,
from: u64,
to: u64,
output_datadir: ChainPath