From a2027c817f346c35865d82dfe1df26da77cc4a9b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 22 Apr 2024 13:38:24 -0400 Subject: [PATCH 001/250] chore: add network or chain question to bug template (#7800) --- .github/ISSUE_TEMPLATE/bug.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index bfb81f1b7..1142a5bf2 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -6,7 +6,7 @@ body: attributes: value: | Thanks for taking the time to fill out this bug report! Please provide as much detail as possible. - + If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead. - type: textarea id: what-happened @@ -14,7 +14,7 @@ body: label: Describe the bug description: | A clear and concise description of what the bug is. - + If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well. validations: required: true @@ -25,7 +25,7 @@ body: description: Please provide any steps you think might be relevant to reproduce the bug. placeholder: | Steps to reproduce: - + 1. Start '...' 2. Then '...' 3. Check '...' @@ -76,6 +76,13 @@ body: description: This can be obtained with `reth db version` validations: required: true + - type: textarea + id: network + attributes: + label: Which chain / network are you on? + description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet. + validations: + required: true - type: dropdown id: node-type attributes: From b9db4cb61bbe0540be5a72cbf31dcb0813994113 Mon Sep 17 00:00:00 2001 From: jn Date: Tue, 23 Apr 2024 02:15:53 -0700 Subject: [PATCH 002/250] Implement Compact for Withdrawal (#7604) Co-authored-by: Matthias Seitz --- crates/storage/codecs/Cargo.toml | 9 ++- crates/storage/codecs/src/alloy/mod.rs | 1 + crates/storage/codecs/src/alloy/withdrawal.rs | 62 +++++++++++++++++++ 3 files changed, 69 insertions(+), 3 deletions(-) create mode 100644 crates/storage/codecs/src/alloy/withdrawal.rs diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 31f954f86..ab8f1a323 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -11,18 +11,21 @@ repository.workspace = true workspace = true [dependencies] +# reth reth-codecs-derive = { path = "./derive", default-features = false } +# eth alloy-eips = { workspace = true, optional = true } alloy-primitives.workspace = true +# misc bytes.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde.workspace = true [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["arbitrary", "serde"] } -serde.workspace = true -modular-bitfield.workspace = true test-fuzz.workspace = true serde_json.workspace = true @@ -33,5 +36,5 @@ proptest-derive.workspace = true [features] default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] -alloy = ["alloy-eips"] +alloy = ["dep:alloy-eips", "dep:modular-bitfield"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 7d7a794fe..aff164642 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,3 +1,4 @@ mod access_list; mod log; mod txkind; +mod withdrawal; diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs new file mode 100644 index 000000000..0849b7e4a --- /dev/null +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -0,0 +1,62 @@ +use crate::Compact; +use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; +use alloy_primitives::Address; +use reth_codecs_derive::main_codec; + +/// Withdrawal acts as bridge which simplifies Compact implementation for AlloyWithdrawal. +/// +/// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct Withdrawal { + /// Monotonically increasing identifier issued by consensus layer. + index: u64, + /// Index of validator associated with withdrawal. + validator_index: u64, + /// Target address for withdrawn ether. + address: Address, + /// Value of the withdrawal in gwei. + amount: u64, +} + +impl Compact for AlloyWithdrawal { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let withdrawal = Withdrawal { + index: self.index, + validator_index: self.validator_index, + address: self.address, + amount: self.amount, + }; + withdrawal.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (withdrawal, _) = Withdrawal::from_compact(buf, len); + let alloy_withdrawal = AlloyWithdrawal { + index: withdrawal.index, + validator_index: withdrawal.validator_index, + address: withdrawal.address, + amount: withdrawal.amount, + }; + (alloy_withdrawal, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::proptest; + + proptest! { + #[test] + fn roundtrip(withdrawal: AlloyWithdrawal) { + let mut compacted_withdrawal = Vec::::new(); + let len = withdrawal.to_compact(&mut compacted_withdrawal); + let (decoded, _) = AlloyWithdrawal::from_compact(&compacted_withdrawal, len); + assert_eq!(withdrawal, decoded) + } + } +} From c499797a6c959af2f2519ca97171ea6fd229a5dc Mon Sep 17 00:00:00 2001 From: Vid Kersic <38610409+Vid201@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:25:42 +0200 Subject: [PATCH 003/250] chore: export tables macro (#7807) --- crates/storage/db/src/lib.rs | 2 +- crates/storage/db/src/tables/mod.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index c0737cc42..5425c8074 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -38,7 +38,7 @@ //! //! # Overview //! -//! An overview of the current data model of reth can be found in the [`tables`] module. +//! An overview of the current data model of reth can be found in the [`mod@tables`] module. //! //! [`Database`]: crate::abstraction::database::Database //! [`DbTx`]: crate::abstraction::transaction::DbTx diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 676ed5ebc..b10662325 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -101,6 +101,7 @@ pub trait TableViewer { } } +#[macro_export] /// Defines all the tables in the database. macro_rules! tables { (@bool) => { false }; From d6b861ea5d068c17baf160e1fd1ca37cdbe154cc Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 23 Apr 2024 11:05:46 +0100 Subject: [PATCH 004/250] feat(exex): send `ExExNotification` instead of `CanonStateNotification` (#7803) --- Cargo.lock | 2 +- crates/exex/src/context.rs | 10 ++-- crates/exex/src/lib.rs | 3 ++ crates/exex/src/manager.rs | 58 +++++++++++---------- crates/exex/src/notification.rs | 54 +++++++++++++++++++ crates/node-builder/src/builder.rs | 2 +- crates/stages/src/stages/execution.rs | 11 ++-- examples/exex/minimal/Cargo.toml | 2 +- examples/exex/minimal/src/main.rs | 23 +++++---- examples/exex/op-bridge/src/main.rs | 74 ++++++++++++++------------- 10 files changed, 149 insertions(+), 90 deletions(-) create mode 100644 crates/exex/src/notification.rs diff --git a/Cargo.lock b/Cargo.lock index dfb64fdd7..d7effc49c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4723,7 +4723,7 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-primitives", - "reth-provider", + "reth-tracing", "tokio", ] diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 619679e85..df2b51377 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -4,11 +4,10 @@ use reth_node_core::{ node_config::NodeConfig, }; use reth_primitives::Head; -use reth_provider::CanonStateNotification; use reth_tasks::TaskExecutor; use tokio::sync::mpsc::{Receiver, UnboundedSender}; -use crate::ExExEvent; +use crate::{ExExEvent, ExExNotification}; /// Captures the context that an ExEx has access to. #[derive(Debug)] @@ -35,12 +34,11 @@ pub struct ExExContext { /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what /// blocks to receive notifications for. pub events: UnboundedSender, - /// Channel to receive [`CanonStateNotification`]s on state transitions. + /// Channel to receive [`ExExNotification`]s. /// /// # Important /// - /// Once a `CanonStateNotification` is sent over the channel, it is considered delivered by the + /// Once a an [`ExExNotification`] is sent over the channel, it is considered delivered by the /// node. - pub notifications: Receiver, - // TODO(alexey): add pool, payload builder, anything else? + pub notifications: Receiver, } diff --git a/crates/exex/src/lib.rs b/crates/exex/src/lib.rs index 638d8af79..4e2d0dd85 100644 --- a/crates/exex/src/lib.rs +++ b/crates/exex/src/lib.rs @@ -42,3 +42,6 @@ pub use event::*; mod manager; pub use manager::*; + +mod notification; +pub use notification::*; diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 59f2bde58..95b950f32 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -1,8 +1,7 @@ -use crate::ExExEvent; +use crate::{ExExEvent, ExExNotification}; use metrics::Gauge; use reth_metrics::{metrics::Counter, Metrics}; use reth_primitives::{BlockNumber, FinishedExExHeight}; -use reth_provider::CanonStateNotification; use reth_tracing::tracing::debug; use std::{ collections::VecDeque, @@ -24,7 +23,7 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; #[derive(Metrics)] #[metrics(scope = "exex")] struct ExExMetrics { - /// The total number of canonical state notifications sent to an ExEx. + /// The total number of notifications sent to an ExEx. notifications_sent_total: Counter, /// The total number of events an ExEx has sent to the manager. events_sent_total: Counter, @@ -42,8 +41,8 @@ pub struct ExExHandle { /// Metrics for an ExEx. metrics: ExExMetrics, - /// Channel to send [`CanonStateNotification`]s to the ExEx. - sender: PollSender, + /// Channel to send [`ExExNotification`]s to the ExEx. + sender: PollSender, /// Channel to receive [`ExExEvent`]s from the ExEx. receiver: UnboundedReceiver, /// The ID of the next notification to send to this ExEx. @@ -59,22 +58,22 @@ impl ExExHandle { /// Create a new handle for the given ExEx. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a - /// [`Receiver`] for [`CanonStateNotification`]s that should be given to the ExEx. - pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { - let (canon_tx, canon_rx) = mpsc::channel(1); + /// [`Receiver`] for [`ExExNotification`]s that should be given to the ExEx. + pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { + let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); ( Self { id: id.clone(), metrics: ExExMetrics::new_with_labels(&[("exex", id)]), - sender: PollSender::new(canon_tx), + sender: PollSender::new(notification_tx), receiver: event_rx, next_notification_id: 0, finished_height: None, }, event_tx, - canon_rx, + notification_rx, ) } @@ -85,14 +84,20 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (event_id, notification): &(usize, CanonStateNotification), - ) -> Poll>> { + (event_id, notification): &(usize, ExExNotification), + ) -> Poll>> { // check that this notification is above the finished height of the exex if the exex has set // one if let Some(finished_height) = self.finished_height { - if finished_height >= notification.tip().number { - self.next_notification_id = event_id + 1; - return Poll::Ready(Ok(())) + match notification { + ExExNotification::ChainCommitted { new } | + ExExNotification::ChainReorged { old: _, new } + if finished_height >= new.tip().number => + { + self.next_notification_id = event_id + 1; + return Poll::Ready(Ok(())) + } + _ => (), } } @@ -142,18 +147,18 @@ pub struct ExExManager { /// Handles to communicate with the ExEx's. exex_handles: Vec, - /// [`CanonStateNotification`] channel from the [`ExExManagerHandle`]s. - handle_rx: UnboundedReceiver, + /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. + handle_rx: UnboundedReceiver, /// The minimum notification ID currently present in the buffer. min_id: usize, - /// Monotonically increasing ID for [`CanonStateNotification`]s. + /// Monotonically increasing ID for [`ExExNotification`]s. next_id: usize, - /// Internal buffer of [`CanonStateNotification`]s. + /// Internal buffer of [`ExExNotification`]s. /// /// The first element of the tuple is a monotonically increasing ID unique to the notification /// (the second element of the tuple). - buffer: VecDeque<(usize, CanonStateNotification)>, + buffer: VecDeque<(usize, ExExNotification)>, /// Max size of the internal state notifications buffer. max_capacity: usize, /// Current state notifications buffer capacity. @@ -244,7 +249,7 @@ impl ExExManager { /// Pushes a new notification into the managers internal buffer, assigning the notification a /// unique ID. - fn push_notification(&mut self, notification: CanonStateNotification) { + fn push_notification(&mut self, notification: ExExNotification) { let next_id = self.next_id; self.buffer.push_back((next_id, notification)); self.next_id += 1; @@ -334,7 +339,7 @@ impl Future for ExExManager { #[derive(Debug)] pub struct ExExManagerHandle { /// Channel to send notifications to the ExEx manager. - exex_tx: UnboundedSender, + exex_tx: UnboundedSender, /// The number of ExEx's running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. @@ -376,10 +381,7 @@ impl ExExManagerHandle { /// Synchronously send a notification over the channel to all execution extensions. /// /// Senders should call [`Self::has_capacity`] first. - pub fn send( - &self, - notification: CanonStateNotification, - ) -> Result<(), SendError> { + pub fn send(&self, notification: ExExNotification) -> Result<(), SendError> { self.exex_tx.send(notification) } @@ -389,8 +391,8 @@ impl ExExManagerHandle { /// capacity in the channel, the future will wait. pub async fn send_async( &mut self, - notification: CanonStateNotification, - ) -> Result<(), SendError> { + notification: ExExNotification, + ) -> Result<(), SendError> { self.ready().await; self.exex_tx.send(notification) } diff --git a/crates/exex/src/notification.rs b/crates/exex/src/notification.rs new file mode 100644 index 000000000..ae8091e0c --- /dev/null +++ b/crates/exex/src/notification.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use reth_provider::{CanonStateNotification, Chain}; + +/// Notifications sent to an ExEx. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExExNotification { + /// Chain got committed without a reorg, and only the new chain is returned. + ChainCommitted { + /// The new chain after commit. + new: Arc, + }, + /// Chain got reorged, and both the old and the new chains are returned. + ChainReorged { + /// The old chain before reorg. + old: Arc, + /// The new chain after reorg. + new: Arc, + }, + /// Chain got reverted, and only the old chain is returned. + ChainReverted { + /// The old chain before reversion. + old: Arc, + }, +} + +impl ExExNotification { + /// Returns the committed chain from the [Self::ChainCommitted] and [Self::ChainReorged] + /// variants, if any. + pub fn committed_chain(&self) -> Option> { + match self { + Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()), + Self::ChainReverted { .. } => None, + } + } + + /// Returns the reverted chain from the [Self::ChainReorged] and [Self::ChainReverted] variants, + /// if any. + pub fn reverted_chain(&self) -> Option> { + match self { + Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), + Self::ChainCommitted { .. } => None, + } + } +} + +impl From for ExExNotification { + fn from(notification: CanonStateNotification) -> Self { + match notification { + CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, + CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, + } + } +} diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 49be32b33..c47478047 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -661,7 +661,7 @@ where executor.spawn_critical("exex manager blockchain tree notifications", async move { while let Ok(notification) = canon_state_notifications.recv().await { handle - .send_async(notification) + .send_async(notification.into()) .await .expect("blockchain tree notification could not be sent to exex manager"); } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 7f22ecaef..b581af403 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -3,7 +3,7 @@ use num_traits::Zero; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_exex::ExExManagerHandle; +use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, @@ -12,9 +12,8 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, CanonStateNotification, Chain, DatabaseProviderRW, ExecutorFactory, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, - TransactionVariant, + BlockReader, Chain, DatabaseProviderRW, ExecutorFactory, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, }; use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, @@ -265,7 +264,7 @@ impl ExecutionStage { // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(CanonStateNotification::Commit { new: chain }); + let _ = self.exex_manager_handle.send(ExExNotification::ChainCommitted { new: chain }); } let time = Instant::now(); @@ -436,7 +435,7 @@ impl Stage for ExecutionStage { // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(CanonStateNotification::Reorg { + let _ = self.exex_manager_handle.send(ExExNotification::ChainReorged { old: Arc::new(chain), new: Arc::new(Chain::default()), }); diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index c1c586fd5..fc6eba841 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -12,7 +12,7 @@ reth-node-api.workspace = true reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true -reth-provider.workspace = true +reth-tracing.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index 1c2463cda..18d3acd2c 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -1,8 +1,8 @@ use futures::Future; -use reth_exex::{ExExContext, ExExEvent}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_provider::CanonStateNotification; +use reth_tracing::tracing::info; /// The initialization logic of the ExEx is just an async function. /// @@ -21,19 +21,20 @@ async fn exex_init( async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.recv().await { match ¬ification { - CanonStateNotification::Commit { new } => { - println!("Received commit: {:?}", new.first().number..=new.tip().number); + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); } - CanonStateNotification::Reorg { old, new } => { - println!( - "Received reorg: {:?} -> {:?}", - old.first().number..=old.tip().number, - new.first().number..=new.tip().number - ); + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); } }; - ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + if let Some(committed_chain) = notification.committed_chain() { + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } } Ok(()) } diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 92e6ef106..0f48b0a5f 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -94,7 +94,8 @@ async fn op_bridge_exex( ) -> eyre::Result<()> { // Process all new chain state notifications while let Some(notification) = ctx.notifications.recv().await { - if let Some(reverted_chain) = notification.reverted() { + // Revert all deposits and withdrawals + if let Some(reverted_chain) = notification.reverted_chain() { let events = decode_chain_into_events(&reverted_chain); let mut deposits = 0; @@ -126,22 +127,22 @@ async fn op_bridge_exex( } // Insert all new deposits and withdrawals - let committed_chain = notification.committed(); - let events = decode_chain_into_events(&committed_chain); - - let mut deposits = 0; - let mut withdrawals = 0; - - for (block, tx, log, event) in events { - match event { - // L1 -> L2 deposit - L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { - amount, - from, - to, - .. - }) => { - let inserted = connection.execute( + if let Some(committed_chain) = notification.committed_chain() { + let events = decode_chain_into_events(&committed_chain); + + let mut deposits = 0; + let mut withdrawals = 0; + + for (block, tx, log, event) in events { + match event { + // L1 -> L2 deposit + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO deposits (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -155,16 +156,16 @@ async fn op_bridge_exex( amount.to_string(), ), )?; - deposits += inserted; - } - // L2 -> L1 withdrawal - L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { - amount, - from, - to, - .. - }) => { - let inserted = connection.execute( + deposits += inserted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO withdrawals (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -178,17 +179,18 @@ async fn op_bridge_exex( amount.to_string(), ), )?; - withdrawals += inserted; - } - _ => continue, - }; - } + withdrawals += inserted; + } + _ => continue, + }; + } - info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); + info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); - // Send a finished height event, signaling the node that we don't need any blocks below - // this height anymore - ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + // Send a finished height event, signaling the node that we don't need any blocks below + // this height anymore + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } } Ok(()) From d1e38966a106d37beacb329cde19b63d09635abf Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 23 Apr 2024 11:36:00 +0100 Subject: [PATCH 005/250] fix(stages): send revert chain notification instead of a reorg on unwind (#7808) --- crates/stages/src/stages/execution.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index b581af403..6fb6f58e7 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -248,7 +248,7 @@ impl ExecutionStage { let state = executor.take_output_state(); let write_preparation_duration = time.elapsed(); - // Check if we should send a [`CanonStateNotification`] to execution extensions. + // Check if we should send a [`ExExNotification`] to execution extensions. // // Note: Since we only write to `blocks` if there are any ExEx's we don't need to perform // the `has_exexs` check here as well @@ -427,18 +427,17 @@ impl Stage for ExecutionStage { // This also updates `PlainStorageState` and `PlainAccountState`. let bundle_state_with_receipts = provider.unwind_or_peek_state::(range.clone())?; - // Construct a `CanonStateNotification` if we have ExEx's installed. + // Construct a `ExExNotification` if we have ExEx's installed. if self.exex_manager_handle.has_exexs() { - // Get the blocks for the unwound range. This is needed for `CanonStateNotification`. + // Get the blocks for the unwound range. This is needed for `ExExNotification`. let blocks = provider.get_take_block_range::(range.clone())?; let chain = Chain::new(blocks, bundle_state_with_receipts, None); // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(ExExNotification::ChainReorged { - old: Arc::new(chain), - new: Arc::new(Chain::default()), - }); + let _ = self + .exex_manager_handle + .send(ExExNotification::ChainReverted { old: Arc::new(chain) }); } // Unwind all receipts for transactions in the block range From 3224837523dddadf86f4d2f27869e5b0b4fda789 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 23 Apr 2024 19:18:32 +0800 Subject: [PATCH 006/250] fix(args/txpool): duplicate arg of txpool.max-account-slots (#7806) Signed-off-by: jsvisa --- book/cli/reth/node.md | 6 +++--- crates/node-core/src/args/txpool_args.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index dbfe7b1d4..450180c84 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -349,7 +349,7 @@ TxPool: [default: 20] - --txpool.max_account_slots + --txpool.max-account-slots Max number of executable transaction slots guaranteed per account [default: 16] @@ -387,7 +387,7 @@ Builder: --builder.extradata Block extra data set by the payload builder - [default: reth/v0.2.0-beta.5/linux] + [default: reth//] --builder.gaslimit Target gas ceiling for built blocks @@ -517,7 +517,7 @@ Logging: --log.file.directory The path to put log files in - [default: /root/.cache/reth/logs] + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file diff --git a/crates/node-core/src/args/txpool_args.rs b/crates/node-core/src/args/txpool_args.rs index db9e43d82..12fc6bd79 100644 --- a/crates/node-core/src/args/txpool_args.rs +++ b/crates/node-core/src/args/txpool_args.rs @@ -35,7 +35,7 @@ pub struct TxPoolArgs { pub queued_max_size: usize, /// Max number of executable transaction slots guaranteed per account - #[arg(long = "txpool.max-account-slots", long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + #[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. From 6d2e20cd8570ed21852db1bce6f3d123dd948283 Mon Sep 17 00:00:00 2001 From: ThreeHrSleep <151536303+ThreeHrSleep@users.noreply.github.com> Date: Tue, 23 Apr 2024 17:20:19 +0530 Subject: [PATCH 007/250] chore: Move consensus trait to standalone crate (#7757) Co-authored-by: Matthias Seitz --- Cargo.lock | 27 +++++++++++++++-- Cargo.toml | 4 ++- bin/reth/Cargo.toml | 1 + .../src/commands/debug_cmd/build_block.rs | 3 +- bin/reth/src/commands/debug_cmd/execution.rs | 6 ++-- bin/reth/src/commands/debug_cmd/merkle.rs | 3 +- .../src/commands/debug_cmd/replay_engine.rs | 2 +- bin/reth/src/commands/import.rs | 10 +++---- bin/reth/src/commands/stage/unwind.rs | 2 +- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 2 +- crates/blockchain-tree/src/externals.rs | 3 +- crates/consensus/auto-seal/Cargo.toml | 2 ++ crates/consensus/auto-seal/src/lib.rs | 6 ++-- crates/consensus/auto-seal/src/task.rs | 2 +- crates/consensus/beacon-core/Cargo.toml | 2 +- crates/consensus/beacon-core/src/lib.rs | 2 +- crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/event.rs | 2 +- crates/consensus/beacon/src/engine/message.rs | 6 ++-- crates/consensus/beacon/src/engine/mod.rs | 4 +-- .../consensus/beacon/src/engine/test_utils.rs | 9 +++--- crates/consensus/common/Cargo.toml | 1 + crates/consensus/common/src/validation.rs | 3 +- crates/consensus/consensus/Cargo.toml | 18 +++++++++++ .../consensus/src/lib.rs} | 13 ++++++-- crates/interfaces/Cargo.toml | 2 +- .../interfaces/src/blockchain_tree/error.rs | 2 +- crates/interfaces/src/error.rs | 2 +- crates/interfaces/src/lib.rs | 3 -- crates/interfaces/src/p2p/error.rs | 3 +- crates/interfaces/src/p2p/full_block.rs | 12 ++++---- .../interfaces/src/p2p/headers/downloader.rs | 7 ++--- crates/interfaces/src/p2p/headers/error.rs | 2 +- crates/interfaces/src/p2p/headers/mod.rs | 2 +- crates/interfaces/src/p2p/mod.rs | 2 +- crates/interfaces/src/test_utils/headers.rs | 30 +++++++++---------- crates/net/downloaders/Cargo.toml | 1 + crates/net/downloaders/src/bodies/bodies.rs | 16 +++++----- crates/net/downloaders/src/bodies/queue.rs | 10 +++---- crates/net/downloaders/src/bodies/request.rs | 12 ++++---- crates/net/downloaders/src/bodies/task.rs | 3 +- .../src/headers/reverse_headers.rs | 18 +++++------ crates/net/downloaders/src/headers/task.rs | 2 +- crates/net/network/Cargo.toml | 1 + crates/net/network/src/import.rs | 2 +- crates/node-builder/Cargo.toml | 1 + crates/node-builder/src/builder.rs | 3 +- crates/node-builder/src/setup.rs | 10 +++---- crates/node/events/Cargo.toml | 2 +- crates/node/events/src/node.rs | 2 +- crates/rpc/rpc-engine-api/Cargo.toml | 1 - crates/rpc/rpc-engine-api/src/engine_api.rs | 5 ++-- crates/stages-api/Cargo.toml | 1 + crates/stages-api/src/error.rs | 7 +++-- crates/stages-api/src/pipeline/mod.rs | 6 ++-- crates/stages/Cargo.toml | 1 + crates/stages/src/lib.rs | 2 +- crates/stages/src/sets.rs | 6 ++-- crates/stages/src/stages/headers.rs | 2 +- crates/stages/src/stages/merkle.rs | 4 +-- crates/stages/src/stages/sender_recovery.rs | 4 +-- crates/storage/provider/Cargo.toml | 1 + crates/storage/provider/src/providers/mod.rs | 2 +- .../storage/provider/src/traits/chain_info.rs | 2 +- 66 files changed, 185 insertions(+), 146 deletions(-) create mode 100644 crates/consensus/consensus/Cargo.toml rename crates/{interfaces/src/consensus.rs => consensus/consensus/src/lib.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index d7effc49c..6802524cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6064,6 +6064,7 @@ dependencies = [ "reth-blockchain-tree", "reth-cli-runner", "reth-config", + "reth-consensus", "reth-consensus-common", "reth-db", "reth-discv4", @@ -6114,12 +6115,14 @@ version = "0.2.0-beta.6" dependencies = [ "futures-util", "reth-beacon-consensus", + "reth-consensus", "reth-engine-primitives", "reth-evm", "reth-interfaces", "reth-primitives", "reth-provider", "reth-revm", + "reth-rpc-types", "reth-stages-api", "reth-transaction-pool", "tokio", @@ -6159,6 +6162,7 @@ dependencies = [ "reth-beacon-consensus-core", "reth-blockchain-tree", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-engine-primitives", @@ -6191,8 +6195,8 @@ dependencies = [ name = "reth-beacon-consensus-core" version = "0.2.0-beta.6" dependencies = [ + "reth-consensus", "reth-consensus-common", - "reth-interfaces", "reth-primitives", ] @@ -6206,6 +6210,7 @@ dependencies = [ "lru", "metrics", "parking_lot 0.12.1", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", @@ -6274,11 +6279,21 @@ dependencies = [ "toml", ] +[[package]] +name = "reth-consensus" +version = "0.2.0-beta.6" +dependencies = [ + "auto_impl", + "reth-primitives", + "thiserror", +] + [[package]] name = "reth-consensus-common" version = "0.2.0-beta.6" dependencies = [ "mockall", + "reth-consensus", "reth-interfaces", "reth-primitives", "reth-provider", @@ -6405,6 +6420,7 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-config", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", @@ -6649,10 +6665,10 @@ dependencies = [ "futures", "parking_lot 0.12.1", "rand 0.8.5", + "reth-consensus", "reth-eth-wire-types", "reth-network-api", "reth-primitives", - "reth-rpc-types", "secp256k1 0.27.0", "thiserror", "tokio", @@ -6781,6 +6797,7 @@ dependencies = [ "pin-project", "pprof", "rand 0.8.5", + "reth-consensus", "reth-discv4", "reth-discv5", "reth-dns-discovery", @@ -6875,6 +6892,7 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-exex", @@ -6993,12 +7011,12 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-db", - "reth-interfaces", "reth-network", "reth-network-api", "reth-primitives", "reth-provider", "reth-prune", + "reth-rpc-types", "reth-stages", "reth-static-file", "tokio", @@ -7173,6 +7191,7 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-rpc-types", "reth-trie", "revm", "strum 0.26.2", @@ -7422,6 +7441,7 @@ dependencies = [ "rayon", "reth-codecs", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-etl", @@ -7450,6 +7470,7 @@ dependencies = [ "auto_impl", "futures-util", "metrics", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", diff --git a/Cargo.toml b/Cargo.toml index 8cf53ef55..1fb403e1b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "crates/consensus/beacon/", "crates/consensus/beacon-core/", "crates/consensus/common/", + "crates/consensus/consensus/", "crates/ethereum-forks/", "crates/e2e-test-utils/", "crates/etl/", @@ -84,7 +85,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", - "testing/ef-tests/", + "testing/ef-tests/" ] default-members = ["bin/reth"] @@ -207,6 +208,7 @@ reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } reth-config = { path = "crates/config" } +reth-consensus = { path = "crates/consensus/consensus" } reth-consensus-common = { path = "crates/consensus/common" } reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ea1ee87f0..ff2515464 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -56,6 +56,7 @@ reth-node-optimism = { workspace = true, optional = true, features = [ reth-node-core.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true +reth-consensus.workspace = true # crypto alloy-rlp.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 566198ec8..7aaef00fa 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -18,8 +18,9 @@ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_cli_runner::CliContext; +use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; -use reth_interfaces::{consensus::Consensus, RethResult}; +use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; #[cfg(not(feature = "optimism"))] use reth_node_ethereum::EthEvmConfig; diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 10f485a73..2384a9af0 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -14,16 +14,14 @@ use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::Consensus; use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, -}; +use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_core::init::init_genesis; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index ed8783e96..d806306d4 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -14,9 +14,10 @@ use clap::Parser; use reth_beacon_consensus::BeaconConsensus; use reth_cli_runner::CliContext; use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; use reth_exex::ExExManagerHandle; -use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; +use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_ethereum::EthEvmConfig; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 0ef866396..841b9e3c6 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -15,8 +15,8 @@ use reth_blockchain_tree::{ }; use reth_cli_runner::CliContext; use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; -use reth_interfaces::consensus::Consensus; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index dc3140924..4542f10be 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -13,6 +13,7 @@ use eyre::Context; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::Consensus; use reth_db::{database::Database, init_db}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -20,12 +21,9 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, - }, +use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_node_core::init::init_genesis; use reth_node_ethereum::EthEvmConfig; diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 7810a4416..a1fe7d8a0 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -10,13 +10,13 @@ use crate::{ use clap::{Parser, Subcommand}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{Config, PruneConfig}; +use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::consensus::Consensus; use reth_node_core::{ args::{get_secret_key, NetworkArgs}, dirs::ChainPath, diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 3a6ab1439..1757b2939 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -19,6 +19,7 @@ reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } +reth-consensus.workspace = true # common parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 02bae76bb..799e7e343 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -5,13 +5,13 @@ use crate::{ state::{BlockChainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, BundleStateData, TreeExternals, }; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, }, - consensus::{Consensus, ConsensusError}, executor::{BlockExecutionError, BlockValidationError}, provider::RootMismatch, RethResult, diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 2444cf24a..c091b800a 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,13 +5,13 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }, - consensus::{Consensus, ConsensusError}, RethResult, }; use reth_primitives::{ diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 5a288271e..c3bda1ae2 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,9 +1,10 @@ //! Blockchain tree externals. +use reth_consensus::Consensus; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_interfaces::{consensus::Consensus, RethResult}; +use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; use reth_provider::{ProviderFactory, StatsReader}; use std::{collections::BTreeMap, sync::Arc}; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 5fbf4f07a..ec96426a4 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -22,6 +22,8 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true +reth-consensus.workspace = true +reth-rpc-types.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 62a293664..f6de63979 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,12 +16,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; +use reth_consensus::{Consensus, ConsensusError}; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_interfaces::{ - consensus::{Consensus, ConsensusError}, - executor::{BlockExecutionError, BlockValidationError}, -}; +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index e76b4333e..53bfc6356 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -3,9 +3,9 @@ use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; +use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ diff --git a/crates/consensus/beacon-core/Cargo.toml b/crates/consensus/beacon-core/Cargo.toml index 232631f73..b5c778b05 100644 --- a/crates/consensus/beacon-core/Cargo.toml +++ b/crates/consensus/beacon-core/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # reth reth-consensus-common.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true +reth-consensus.workspace = true [features] optimism = ["reth-primitives/optimism"] \ No newline at end of file diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/consensus/beacon-core/src/lib.rs index 599e01009..c2a3df6e6 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/consensus/beacon-core/src/lib.rs @@ -8,8 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use reth_consensus::{Consensus, ConsensusError}; use reth_consensus_common::validation; -use reth_interfaces::consensus::{Consensus, ConsensusError}; use reth_primitives::{ constants::MAXIMUM_EXTRA_DATA_SIZE, Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 439002ec5..0fed12597 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -57,6 +57,7 @@ reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true +reth-consensus.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 168130de7..d5cbdee46 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,6 +1,6 @@ use crate::engine::forkchoice::ForkchoiceStatus; -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{SealedBlock, SealedHeader, B256}; +use reth_rpc_types::engine::ForkchoiceState; use std::{sync::Arc, time::Duration}; /// Events emitted by [crate::BeaconConsensusEngine]. diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 464dcedb2..f9f1a84d4 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -4,11 +4,11 @@ use crate::{ }; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; -use reth_interfaces::{consensus::ForkchoiceState, RethResult}; +use reth_interfaces::RethResult; use reth_payload_builder::error::PayloadBuilderError; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, - ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, + CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use std::{ future::Future, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 877e6f450..5e22a4860 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -14,7 +14,6 @@ use reth_interfaces::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, - consensus::ForkchoiceState, executor::BlockValidationError, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, provider::ProviderResult, @@ -31,7 +30,8 @@ use reth_provider::{ StageCheckpointReader, }; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, PayloadStatus, PayloadStatusEnum, PayloadValidationError, + CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, }; use reth_stages_api::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 329ea6446..297269975 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -7,16 +7,15 @@ use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; -use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm_ethereum::EthEvmConfig; -type DatabaseEnv = TempDatabase; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::{ - consensus::Consensus, executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, @@ -40,6 +39,8 @@ use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::{oneshot, watch}; +type DatabaseEnv = TempDatabase; + type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, BlockchainProvider< diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 4659dd3e1..5e5a6ef57 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true +reth-consensus.workspace=true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 1ab466c77..949061882 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,6 +1,7 @@ //! Collection of methods for block validation. -use reth_interfaces::{consensus::ConsensusError, RethResult}; +use reth_consensus::ConsensusError; +use reth_interfaces::RethResult; use reth_primitives::{ constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml new file mode 100644 index 000000000..6e4fc7ee9 --- /dev/null +++ b/crates/consensus/consensus/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true + +# misc +auto_impl.workspace = true +thiserror.workspace = true \ No newline at end of file diff --git a/crates/interfaces/src/consensus.rs b/crates/consensus/consensus/src/lib.rs similarity index 95% rename from crates/interfaces/src/consensus.rs rename to crates/consensus/consensus/src/lib.rs index b7d03b72e..ab2453b74 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -1,12 +1,19 @@ +//! Consensus protocol functions + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + use reth_primitives::{ BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, HeaderValidationError, InvalidTransactionError, SealedBlock, SealedHeader, B256, U256, }; use std::fmt::Debug; -/// Re-export fork choice state -pub use reth_rpc_types::engine::ForkchoiceState; - /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Debug + Send + Sync { diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 8f4aa494a..70ac2f942 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -12,9 +12,9 @@ workspace = true [dependencies] reth-primitives.workspace = true -reth-rpc-types.workspace = true reth-network-api.workspace = true reth-eth-wire-types.workspace = true +reth-consensus.workspace = true # async futures.workspace = true diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index e08211a4f..34e018835 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -1,11 +1,11 @@ //! Error handling for the blockchain tree use crate::{ - consensus::ConsensusError, executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, RethError, }; +use reth_consensus::ConsensusError; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; /// Various error cases that can occur when a block violates tree assumptions. diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index c49323595..df307ae09 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -1,10 +1,10 @@ use crate::{ blockchain_tree::error::{BlockchainTreeError, CanonicalError}, - consensus::ConsensusError, db::DatabaseError, executor::BlockExecutionError, provider::ProviderError, }; +use reth_consensus::ConsensusError; use reth_network_api::NetworkError; use reth_primitives::fs::FsPathError; diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index b8cfb7b39..e60d4a621 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -12,9 +12,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Consensus traits. -pub mod consensus; - /// Database error pub mod db; diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 3c4e351fc..6d822f44c 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -1,5 +1,6 @@ use super::headers::client::HeadersRequest; -use crate::{consensus::ConsensusError, db::DatabaseError, provider::ProviderError}; +use crate::{db::DatabaseError, provider::ProviderError}; +use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 3ab8e7644..cb4c66543 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -1,13 +1,11 @@ use super::headers::client::HeadersRequest; -use crate::{ - consensus::{Consensus, ConsensusError}, - p2p::{ - bodies::client::{BodiesClient, SingleBodyRequest}, - error::PeerRequestResult, - headers::client::{HeadersClient, SingleHeaderRequest}, - }, +use crate::p2p::{ + bodies::client::{BodiesClient, SingleBodyRequest}, + error::PeerRequestResult, + headers::client::{HeadersClient, SingleHeaderRequest}, }; use futures::Stream; +use reth_consensus::{Consensus, ConsensusError}; use reth_primitives::{ BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, B256, }; diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index 9eea13aab..500a1a1bc 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -1,11 +1,8 @@ use super::error::HeadersDownloaderResult; -use crate::{ - consensus::Consensus, - p2p::error::{DownloadError, DownloadResult}, -}; +use crate::p2p::error::{DownloadError, DownloadResult}; use futures::Stream; +use reth_consensus::Consensus; use reth_primitives::{BlockHashOrNumber, SealedHeader, B256}; - /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, diff --git a/crates/interfaces/src/p2p/headers/error.rs b/crates/interfaces/src/p2p/headers/error.rs index 12eab9548..f586aaf74 100644 --- a/crates/interfaces/src/p2p/headers/error.rs +++ b/crates/interfaces/src/p2p/headers/error.rs @@ -1,4 +1,4 @@ -use crate::consensus::ConsensusError; +use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; use thiserror::Error; diff --git a/crates/interfaces/src/p2p/headers/mod.rs b/crates/interfaces/src/p2p/headers/mod.rs index 5746c1b2d..56aabf9d6 100644 --- a/crates/interfaces/src/p2p/headers/mod.rs +++ b/crates/interfaces/src/p2p/headers/mod.rs @@ -6,7 +6,7 @@ pub mod client; /// A downloader that receives and verifies block headers, is generic /// over the Consensus and the HeadersClient being used. /// -/// [`Consensus`]: crate::consensus::Consensus +/// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: client::HeadersClient pub mod downloader; diff --git a/crates/interfaces/src/p2p/mod.rs b/crates/interfaces/src/p2p/mod.rs index 8e4d7c84f..75f3a8fc4 100644 --- a/crates/interfaces/src/p2p/mod.rs +++ b/crates/interfaces/src/p2p/mod.rs @@ -14,7 +14,7 @@ pub mod full_block; /// of a Linear and a Parallel downloader generic over the [`Consensus`] and /// [`HeadersClient`]. /// -/// [`Consensus`]: crate::consensus::Consensus +/// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: crate::p2p::headers::client::HeadersClient pub mod headers; diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 8262d9ae0..c0da9ff16 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -1,19 +1,17 @@ //! Testing support for headers related interfaces. -use crate::{ - consensus::{self, Consensus, ConsensusError}, - p2p::{ - download::DownloadClient, - error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, - headers::{ - client::{HeadersClient, HeadersRequest}, - downloader::{HeaderDownloader, SyncTarget}, - error::HeadersDownloaderResult, - }, - priority::Priority, +use crate::p2p::{ + download::DownloadClient, + error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, + headers::{ + client::{HeadersClient, HeadersRequest}, + downloader::{HeaderDownloader, SyncTarget}, + error::HeadersDownloaderResult, }, + priority::Priority, }; use futures::{Future, FutureExt, Stream, StreamExt}; +use reth_consensus::{Consensus, ConsensusError}; use reth_primitives::{ Header, HeadersDirection, PeerId, SealedBlock, SealedHeader, WithPeerId, U256, }; @@ -274,7 +272,7 @@ impl TestConsensus { impl Consensus for TestConsensus { fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } @@ -286,7 +284,7 @@ impl Consensus for TestConsensus { _parent: &SealedHeader, ) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } @@ -298,15 +296,15 @@ impl Consensus for TestConsensus { _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_block(&self, _block: &SealedBlock) -> Result<(), consensus::ConsensusError> { + fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) + Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 7ae6db8e6..b5de192f2 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives.workspace = true reth-tasks.workspace = true reth-provider.workspace = true reth-config.workspace = true +reth-consensus.workspace = true # async futures.workspace = true diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index d45c9b191..28b43359e 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -3,16 +3,14 @@ use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; use futures::Stream; use futures_util::StreamExt; use reth_config::BodiesConfig; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{ - client::BodiesClient, - downloader::{BodyDownloader, BodyDownloaderResult}, - response::BlockResponse, - }, - error::{DownloadError, DownloadResult}, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{ + client::BodiesClient, + downloader::{BodyDownloader, BodyDownloaderResult}, + response::BlockResponse, }, + error::{DownloadError, DownloadResult}, }; use reth_primitives::{BlockNumber, SealedHeader}; use reth_provider::HeaderProvider; diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 0fc9635df..072e059a4 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -2,12 +2,10 @@ use super::request::BodiesRequestFuture; use crate::metrics::BodyDownloaderMetrics; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, response::BlockResponse}, - error::DownloadResult, - }, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, response::BlockResponse}, + error::DownloadResult, }; use reth_primitives::{BlockNumber, SealedHeader}; use std::{ diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 302256ef4..1a87928d5 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,12 +1,10 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use futures::{Future, FutureExt}; -use reth_interfaces::{ - consensus::{Consensus as ConsensusTrait, Consensus}, - p2p::{ - bodies::{client::BodiesClient, response::BlockResponse}, - error::{DownloadError, DownloadResult}, - priority::Priority, - }, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, response::BlockResponse}, + error::{DownloadError, DownloadResult}, + priority::Priority, }; use reth_primitives::{ BlockBody, GotExpected, PeerId, SealedBlock, SealedHeader, WithPeerId, B256, diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index a57e5e486..2d9bb3f96 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -42,8 +42,9 @@ impl TaskDownloader { /// # Example /// /// ``` + /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; - /// use reth_interfaces::{consensus::Consensus, p2p::bodies::client::BodiesClient}; + /// use reth_interfaces::p2p::bodies::client::BodiesClient; /// use reth_provider::HeaderProvider; /// use std::sync::Arc; /// diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 3af45c172..8d2318507 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -6,17 +6,15 @@ use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - error::{DownloadError, DownloadResult, PeerRequestResult}, - headers::{ - client::{HeadersClient, HeadersRequest}, - downloader::{validate_header_download, HeaderDownloader, SyncTarget}, - error::{HeadersDownloaderError, HeadersDownloaderResult}, - }, - priority::Priority, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + error::{DownloadError, DownloadResult, PeerRequestResult}, + headers::{ + client::{HeadersClient, HeadersRequest}, + downloader::{validate_header_download, HeaderDownloader, SyncTarget}, + error::{HeadersDownloaderError, HeadersDownloaderResult}, }, + priority::Priority, }; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, PeerId, SealedHeader, diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 83f00d4f1..16597342b 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -44,7 +44,7 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_interfaces::consensus::Consensus; + /// # use reth_consensus::Consensus; /// # use reth_interfaces::p2p::headers::client::HeadersClient; /// # fn t(consensus:Arc, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 8e4c110bb..dbf7f5fa2 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -27,6 +27,7 @@ reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-tokio-util.workspace = true +reth-consensus.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index d127dab8f..738851f0a 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -50,7 +50,7 @@ pub enum BlockValidation { pub enum BlockImportError { /// Consensus error #[error(transparent)] - Consensus(#[from] reth_interfaces::consensus::ConsensusError), + Consensus(#[from] reth_consensus::ConsensusError), } /// An implementation of `BlockImport` used in Proof-of-Stake consensus that does nothing. diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index c245203ca..aae73f5a6 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -37,6 +37,7 @@ reth-stages.workspace = true reth-config.workspace = true reth-downloaders.workspace = true reth-node-events.workspace = true +reth-consensus.workspace = true ## async futures.workspace = true diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index c47478047..3d7d3a044 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -22,6 +22,7 @@ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, @@ -29,7 +30,7 @@ use reth_db::{ DatabaseEnv, }; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::{consensus::Consensus, p2p::either::EitherDownloader}; +use reth_interfaces::p2p::either::EitherDownloader; use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; use reth_node_api::{ FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, diff --git a/crates/node-builder/src/setup.rs b/crates/node-builder/src/setup.rs index bb67cad66..03bf45893 100644 --- a/crates/node-builder/src/setup.rs +++ b/crates/node-builder/src/setup.rs @@ -2,18 +2,16 @@ use crate::ConfigureEvm; use reth_config::{config::StageConfig, PruneConfig}; +use reth_consensus::Consensus; use reth_db::database::Database; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, downloader::BodyDownloader}, - headers::{client::HeadersClient, downloader::HeaderDownloader}, - }, +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, downloader::BodyDownloader}, + headers::{client::HeadersClient, downloader::HeaderDownloader}, }; use reth_node_core::{ node_config::NodeConfig, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9c66f1469..83f2bd13a 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -18,9 +18,9 @@ reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true reth-static-file.workspace = true -reth-interfaces.workspace = true reth-db.workspace = true reth-primitives.workspace = true +reth-rpc-types.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index b18cc5f0b..32bf66e1b 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -6,7 +6,6 @@ use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; use reth_db::{database::Database, database_metrics::DatabaseMetadata}; -use reth_interfaces::consensus::ForkchoiceState; use reth_network::{NetworkEvent, NetworkHandle}; use reth_network_api::PeersInfo; use reth_primitives::{ @@ -15,6 +14,7 @@ use reth_primitives::{ BlockNumber, B256, }; use reth_prune::PrunerEvent; +use reth_rpc_types::engine::ForkchoiceState; use reth_stages::{ExecOutput, PipelineEvent}; use reth_static_file::StaticFileProducerEvent; use std::{ diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 271363963..b7e6eeccb 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-rpc-api.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 7fc52b21c..01db0bb11 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -6,15 +6,14 @@ use reth_engine_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::PayloadStore; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, B256, U64}; use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, CAPABILITIES, + ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index d991a47af..d1e31ba78 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -18,6 +18,7 @@ reth-db.workspace = true reth-interfaces.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true +reth-consensus.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 3b744e7cb..3d7ae1d72 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,5 +1,6 @@ +use reth_consensus::ConsensusError; use reth_interfaces::{ - consensus, db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, + db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, }; use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; use reth_provider::ProviderError; @@ -13,7 +14,7 @@ use tokio::sync::mpsc::error::SendError; pub enum BlockErrorKind { /// The block encountered a validation error. #[error("validation error: {0}")] - Validation(#[from] consensus::ConsensusError), + Validation(#[from] ConsensusError), /// The block encountered an execution error. #[error("execution error: {0}")] Execution(#[from] executor::BlockExecutionError), @@ -49,7 +50,7 @@ pub enum StageError { header: Box, /// The error that occurred when attempting to attach the header. #[source] - error: Box, + error: Box, }, /// The headers stage is missing sync gap. #[error("missing sync gap")] diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index bb1512958..1b455a939 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -552,8 +552,8 @@ mod tests { use super::*; use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; + use reth_consensus::ConsensusError; use reth_interfaces::{ - consensus, provider::ProviderError, test_utils::{generators, generators::random_header}, }; @@ -922,9 +922,7 @@ mod tests { 5, Default::default(), )), - error: BlockErrorKind::Validation( - consensus::ConsensusError::BaseFeeMissing, - ), + error: BlockErrorKind::Validation(ConsensusError::BaseFeeMissing), })) .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(0) })) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index df98d1dd7..3c4a3d5a1 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -23,6 +23,7 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-etl.workspace = true reth-config.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index f8e427763..cf2b8acbe 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -15,7 +15,6 @@ //! # use std::sync::Arc; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; -//! # use reth_interfaces::consensus::Consensus; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestConsensus, TestHeadersClient}; //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; @@ -28,6 +27,7 @@ //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; +//! # use reth_consensus::Consensus; //! # //! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 833f2af8e..4f04e9b10 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -58,10 +58,10 @@ use crate::{ StageSet, StageSetBuilder, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::database::Database; -use reth_interfaces::{ - consensus::Consensus, - p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}, +use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, }; use reth_provider::{ExecutorFactory, HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index a862d4afc..f90149e1a 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -1,6 +1,7 @@ use futures_util::StreamExt; use reth_codecs::Compact; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -10,7 +11,6 @@ use reth_db::{ }; use reth_etl::Collector; use reth_interfaces::{ - consensus::Consensus, p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}, provider::ProviderError, }; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 9b4eec87f..bfdb9782b 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -1,10 +1,10 @@ use reth_codecs::Compact; +use reth_consensus::ConsensusError; use reth_db::{ database::Database, tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::consensus; use reth_primitives::{ stage::{EntitiesCheckpoint, MerkleCheckpoint, StageCheckpoint, StageId}, trie::StoredSubNode, @@ -327,7 +327,7 @@ fn validate_state_root( } else { warn!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root"); Err(StageError::Block { - error: BlockErrorKind::Validation(consensus::ConsensusError::BodyStateRootDiff( + error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), )), block: Box::new(expected), diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 04a30cb2e..5ddb2dfc0 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -1,3 +1,4 @@ +use reth_consensus::ConsensusError; use reth_db::{ cursor::DbCursorRW, database::Database, @@ -6,7 +7,6 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, RawValue, }; -use reth_interfaces::consensus; use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, Address, PruneSegment, StaticFileSegment, TransactionSignedNoHash, TxNumber, @@ -209,7 +209,7 @@ fn recover_range( Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( - consensus::ConsensusError::TransactionSignerRecoveryError, + ConsensusError::TransactionSignerRecoveryError, ), }) } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 6f9305e88..526cbdaa5 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-interfaces.workspace = true +reth-rpc-types.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f696c86d7..b3011a9b0 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -17,7 +17,6 @@ use reth_interfaces::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }, - consensus::ForkchoiceState, provider::ProviderResult, RethResult, }; @@ -61,6 +60,7 @@ use chain_info::ChainInfoTracker; mod consistent_view; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; +use reth_rpc_types::engine::ForkchoiceState; /// The main type for interacting with the blockchain. /// diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/provider/src/traits/chain_info.rs index 82d879df4..5e6379f01 100644 --- a/crates/storage/provider/src/traits/chain_info.rs +++ b/crates/storage/provider/src/traits/chain_info.rs @@ -1,5 +1,5 @@ -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::SealedHeader; +use reth_rpc_types::engine::ForkchoiceState; use std::time::Instant; /// A type that can track updates related to fork choice updates. From eabd0220c6a27e0b3ad3292a7c9534ee4ea2c365 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 23 Apr 2024 14:24:58 +0200 Subject: [PATCH 008/250] feat: support short issue links in label workflow (#7814) --- .github/scripts/label_pr.js | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/.github/scripts/label_pr.js b/.github/scripts/label_pr.js index c01f4c98a..16ace2db0 100644 --- a/.github/scripts/label_pr.js +++ b/.github/scripts/label_pr.js @@ -8,6 +8,20 @@ function shouldIncludeLabel (label) { return !isStatus && !isTrackingIssue && !isPreventStale && !isDifficulty; } +// Get the issue number from an issue link in the forms ` ` or ` #`. +function getIssueLink (repoUrl, body) { + const urlPattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') + const issuePattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) \#(?\\d+)`, 'i') + + const urlRe = body.match(urlPattern); + const issueRe = body.match(issuePattern); + if (urlRe?.groups?.issue_number) { + return urlRe.groups.issue_number + } else { + return issueRe?.groups?.issue_number + } +} + module.exports = async ({ github, context }) => { try { const prNumber = context.payload.pull_request.number; @@ -15,11 +29,7 @@ module.exports = async ({ github, context }) => { const repo = context.repo; const repoUrl = context.payload.repository.html_url; - const pattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') - - const re = prBody.match(pattern); - const issueNumber = re?.groups?.issue_number; - + const issueNumber = getIssueLink(repoUrl, prBody); if (!issueNumber) { console.log('No issue reference found in PR description.'); return; From 672e4c512cdd99b6fcfb8dec9940a5150d5b16a7 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 23 Apr 2024 20:26:57 +0800 Subject: [PATCH 009/250] chore(bin/stage): add explicit doc of the use of to-block and hashing stage (#7805) Signed-off-by: jsvisa Co-authored-by: Oliver Nordbjerg --- bin/reth/src/commands/stage/unwind.rs | 9 +++++++-- crates/node-core/src/args/stage_args.rs | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index a1fe7d8a0..2682683d4 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -236,10 +236,12 @@ impl Command { /// `reth stage unwind` subcommand #[derive(Subcommand, Debug, Eq, PartialEq)] enum Subcommands { - /// Unwinds the database until the given block number (range is inclusive). + /// Unwinds the database from the latest block, until the given block number or hash has been + /// reached, that block is not included. #[command(name = "to-block")] ToBlock { target: BlockHashOrNumber }, - /// Unwinds the given number of blocks from the database. + /// Unwinds the database from the latest block, until the given number of blocks have been + /// reached. #[command(name = "num-blocks")] NumBlocks { amount: u64 }, } @@ -263,6 +265,9 @@ impl Subcommands { }, Subcommands::NumBlocks { amount } => last.saturating_sub(*amount), } + 1; + if target > last { + eyre::bail!("Target block number is higher than the latest block number") + } Ok(target..=last) } } diff --git a/crates/node-core/src/args/stage_args.rs b/crates/node-core/src/args/stage_args.rs index d90eabcfc..337f5a4a6 100644 --- a/crates/node-core/src/args/stage_args.rs +++ b/crates/node-core/src/args/stage_args.rs @@ -30,11 +30,11 @@ pub enum StageEnum { /// /// Manages operations related to hashing storage data. StorageHashing, - /// The hashing stage within the pipeline. + /// The account and storage hashing stages within the pipeline. /// /// Covers general data hashing operations. Hashing, - /// The Merkle stage within the pipeline. + /// The merkle stage within the pipeline. /// /// Handles Merkle tree-related computations and data processing. Merkle, From 9fd35f948c65325447d1d7dced207842d2f2c782 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 14:42:51 +0200 Subject: [PATCH 010/250] chore: move TestConensus to reth-consensus (#7813) --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/consensus/beacon/Cargo.toml | 2 +- .../consensus/beacon/src/engine/test_utils.rs | 4 +- crates/consensus/consensus/Cargo.toml | 5 +- crates/consensus/consensus/src/lib.rs | 4 + crates/consensus/consensus/src/test_utils.rs | 70 +++++++++++++ crates/interfaces/Cargo.toml | 4 +- crates/interfaces/src/p2p/full_block.rs | 2 +- crates/interfaces/src/test_utils/headers.rs | 97 +++---------------- crates/net/downloaders/Cargo.toml | 3 +- crates/net/downloaders/src/bodies/bodies.rs | 3 +- crates/net/downloaders/src/bodies/request.rs | 3 +- crates/net/downloaders/src/bodies/task.rs | 3 +- crates/net/downloaders/src/file_client.rs | 10 +- .../src/headers/reverse_headers.rs | 3 +- crates/net/downloaders/src/headers/task.rs | 3 +- crates/stages/Cargo.toml | 1 + crates/stages/src/lib.rs | 3 +- crates/stages/src/stages/headers.rs | 4 +- 19 files changed, 122 insertions(+), 104 deletions(-) create mode 100644 crates/consensus/consensus/src/test_utils.rs diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 799e7e343..08f588cd2 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1259,8 +1259,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; + use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_interfaces::test_utils::TestConsensus; use reth_node_ethereum::EthEvmConfig; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 0fed12597..9a7841447 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -45,6 +45,7 @@ schnellru.workspace = true # reth reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } @@ -57,7 +58,6 @@ reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true -reth-consensus.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 297269975..ff36e871b 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -7,7 +7,7 @@ use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_config::config::EtlConfig; -use reth_consensus::Consensus; +use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -19,7 +19,7 @@ use reth_interfaces::{ executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, - test_utils::{NoopFullBlockClient, TestConsensus}, + test_utils::NoopFullBlockClient, }; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 6e4fc7ee9..308a16f20 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -15,4 +15,7 @@ reth-primitives.workspace = true # misc auto_impl.workspace = true -thiserror.workspace = true \ No newline at end of file +thiserror.workspace = true + +[features] +test-utils = [] \ No newline at end of file diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index ab2453b74..b434272a4 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -14,6 +14,10 @@ use reth_primitives::{ }; use std::fmt::Debug; +#[cfg(any(test, feature = "test-utils"))] +/// test helpers for mocking consensus +pub mod test_utils; + /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Debug + Send + Sync { diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs new file mode 100644 index 000000000..a8655661b --- /dev/null +++ b/crates/consensus/consensus/src/test_utils.rs @@ -0,0 +1,70 @@ +use crate::{Consensus, ConsensusError}; +use reth_primitives::{Header, SealedBlock, SealedHeader, U256}; +use std::sync::atomic::{AtomicBool, Ordering}; + +/// Consensus engine implementation for testing +#[derive(Debug)] +pub struct TestConsensus { + /// Flag whether the header validation should purposefully fail + fail_validation: AtomicBool, +} + +impl Default for TestConsensus { + fn default() -> Self { + Self { fail_validation: AtomicBool::new(false) } + } +} + +impl TestConsensus { + /// Get the failed validation flag. + pub fn fail_validation(&self) -> bool { + self.fail_validation.load(Ordering::SeqCst) + } + + /// Update the validation flag. + pub fn set_fail_validation(&self, val: bool) { + self.fail_validation.store(val, Ordering::SeqCst) + } +} + +impl Consensus for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_against_parent( + &self, + _header: &SealedHeader, + _parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_with_total_difficulty( + &self, + _header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } +} diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 70ac2f942..6c066593b 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -34,12 +34,14 @@ parking_lot = { workspace = true, optional = true } rand = { workspace = true, optional = true } [dev-dependencies] +reth-consensus = { workspace = true, features = ["test-utils"] } + parking_lot.workspace = true rand.workspace = true tokio = { workspace = true, features = ["full"] } secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] -test-utils = ["secp256k1", "rand", "parking_lot"] +test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] cli = ["clap"] optimism = ["reth-eth-wire-types/optimism"] diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index cb4c66543..6cf3f2c81 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -36,7 +36,7 @@ impl FullBlockClient { /// Returns a client with Test consensus #[cfg(any(test, feature = "test-utils"))] pub fn test_client(client: Client) -> Self { - Self::new(client, Arc::new(crate::test_utils::TestConsensus::default())) + Self::new(client, Arc::new(reth_consensus::test_utils::TestConsensus::default())) } } diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index c0da9ff16..304f394c8 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -1,5 +1,18 @@ //! Testing support for headers related interfaces. +use std::{ + fmt, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + task::{ready, Context, Poll}, +}; + +use futures::{Future, FutureExt, Stream, StreamExt}; +use tokio::sync::Mutex; + use crate::p2p::{ download::DownloadClient, error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, @@ -10,21 +23,8 @@ use crate::p2p::{ }, priority::Priority, }; -use futures::{Future, FutureExt, Stream, StreamExt}; -use reth_consensus::{Consensus, ConsensusError}; -use reth_primitives::{ - Header, HeadersDirection, PeerId, SealedBlock, SealedHeader, WithPeerId, U256, -}; -use std::{ - fmt, - pin::Pin, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - task::{ready, Context, Poll}, -}; -use tokio::sync::Mutex; +use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_primitives::{Header, HeadersDirection, PeerId, SealedHeader, WithPeerId}; /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] @@ -243,70 +243,3 @@ impl HeadersClient for TestHeadersClient { }) } } - -/// Consensus engine implementation for testing -#[derive(Debug)] -pub struct TestConsensus { - /// Flag whether the header validation should purposefully fail - fail_validation: AtomicBool, -} - -impl Default for TestConsensus { - fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } - } -} - -impl TestConsensus { - /// Get the failed validation flag. - pub fn fail_validation(&self) -> bool { - self.fail_validation.load(Ordering::SeqCst) - } - - /// Update the validation flag. - pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) - } -} - -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } -} diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index b5de192f2..f1f14c85c 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -45,6 +45,7 @@ itertools.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true @@ -58,5 +59,5 @@ rand.workspace = true tempfile.workspace = true [features] -test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-interfaces/test-utils"] +test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-consensus/test-utils", "reth-interfaces/test-utils"] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 28b43359e..985c545e9 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -604,8 +604,9 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; + use reth_consensus::test_utils::TestConsensus; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_interfaces::test_utils::{generators, generators::random_block_range, TestConsensus}; + use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockBody, B256, MAINNET}; use reth_provider::ProviderFactory; use std::collections::HashMap; diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 1a87928d5..d6da2444c 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -250,7 +250,8 @@ mod tests { bodies::test_utils::zip_blocks, test_utils::{generate_bodies, TestBodiesClient}, }; - use reth_interfaces::test_utils::{generators, generators::random_header_range, TestConsensus}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::{generators, generators::random_header_range}; /// Check if future returns empty bodies without dispatching any requests. #[tokio::test] diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 2d9bb3f96..f8815bcb0 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -170,7 +170,8 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; - use reth_interfaces::{p2p::error::DownloadError, test_utils::TestConsensus}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::p2p::error::DownloadError; use reth_provider::test_utils::create_test_provider_factory; use std::sync::Arc; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 7d29cc577..b5b7aceae 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -434,12 +434,10 @@ mod tests { use assert_matches::assert_matches; use futures_util::stream::StreamExt; use rand::Rng; - use reth_interfaces::{ - p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, - }, - test_utils::TestConsensus, + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_provider::test_utils::create_test_provider_factory; use std::{mem, sync::Arc}; diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 8d2318507..5c12a161a 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1223,7 +1223,8 @@ mod tests { use crate::headers::test_utils::child_header; use assert_matches::assert_matches; - use reth_interfaces::test_utils::{TestConsensus, TestHeadersClient}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::TestHeadersClient; /// Tests that `replace_number` works the same way as Option::replace #[test] diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 16597342b..aa079dad2 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -183,7 +183,8 @@ mod tests { use crate::headers::{ reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header, }; - use reth_interfaces::test_utils::{TestConsensus, TestHeadersClient}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::TestHeadersClient; use std::sync::Arc; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 3c4a3d5a1..2692c9410 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -45,6 +45,7 @@ reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } reth-evm-ethereum.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index cf2b8acbe..4b6df9391 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -15,7 +15,7 @@ //! # use std::sync::Arc; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; -//! # use reth_interfaces::test_utils::{TestBodiesClient, TestConsensus, TestHeadersClient}; +//! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; //! # use reth_stages::Pipeline; @@ -28,6 +28,7 @@ //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; //! # use reth_consensus::Consensus; +//! # use reth_consensus::test_utils::TestConsensus; //! # //! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index f90149e1a..83dd710d0 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -371,13 +371,13 @@ mod tests { mod test_runner { use super::*; use crate::test_utils::{TestRunnerError, TestStageDB}; + use reth_consensus::test_utils::TestConsensus; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; use reth_interfaces::test_utils::{ - generators, generators::random_header_range, TestConsensus, TestHeaderDownloader, - TestHeadersClient, + generators, generators::random_header_range, TestHeaderDownloader, TestHeadersClient, }; use reth_provider::BlockNumReader; use tokio::sync::watch; From b3db4cf56d3488cdad3f655c725ccf747aa939bd Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:30:38 +0100 Subject: [PATCH 011/250] feat: add UX improvements on e2e testing (#7804) --- Cargo.lock | 5 + crates/consensus/beacon/Cargo.toml | 3 + crates/e2e-test-utils/Cargo.toml | 3 + crates/e2e-test-utils/src/engine_api.rs | 28 ++++- crates/e2e-test-utils/src/lib.rs | 94 ++++++++++++++++ crates/e2e-test-utils/src/node.rs | 121 ++++++++++++++++----- crates/e2e-test-utils/src/payload.rs | 2 +- crates/e2e-test-utils/src/wallet.rs | 9 +- crates/node-ethereum/tests/e2e/dev.rs | 38 ++----- crates/node-ethereum/tests/e2e/eth.rs | 48 +++----- crates/node-ethereum/tests/e2e/p2p.rs | 77 ++++--------- crates/node-ethereum/tests/e2e/utils.rs | 5 + crates/optimism/node/Cargo.toml | 3 +- crates/optimism/node/tests/e2e/p2p.rs | 82 +++----------- crates/optimism/node/tests/e2e/utils.rs | 44 +++++++- crates/storage/provider/src/traits/full.rs | 4 +- 16 files changed, 344 insertions(+), 222 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6802524cd..3bd9ff743 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6176,6 +6176,7 @@ dependencies = [ "reth-provider", "reth-prune", "reth-revm", + "reth-rpc", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", @@ -6451,16 +6452,19 @@ dependencies = [ "rand 0.8.5", "reth", "reth-db", + "reth-node-builder", "reth-node-core", "reth-node-ethereum", "reth-payload-builder", "reth-primitives", + "reth-provider", "reth-rpc", "reth-tracing", "secp256k1 0.27.0", "serde_json", "tokio", "tokio-stream", + "tracing", ] [[package]] @@ -7039,6 +7043,7 @@ dependencies = [ "reqwest 0.11.27", "reth", "reth-basic-payload-builder", + "reth-beacon-consensus", "reth-db", "reth-e2e-test-utils", "reth-evm", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 9a7841447..38dd772af 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -52,6 +52,7 @@ reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true +reth-rpc.workspace = true reth-tracing.workspace = true reth-revm.workspace = true reth-downloaders.workspace = true @@ -68,4 +69,6 @@ optimism = [ "reth-provider/optimism", "reth-blockchain-tree/optimism", "reth-beacon-consensus-core/optimism", + "reth-revm/optimism", + "reth-rpc/optimism" ] diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index f32ff029c..96b4ca2e6 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -17,6 +17,8 @@ reth-tracing.workspace = true reth-db.workspace = true reth-rpc.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-provider.workspace = true +reth-node-builder.workspace = true jsonrpsee.workspace = true @@ -32,3 +34,4 @@ alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true alloy-consensus.workspace = true +tracing.workspace = true \ No newline at end of file diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index ec8b058a3..fe05b0b68 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -3,7 +3,10 @@ use jsonrpsee::http_client::HttpClient; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, - rpc::{api::EngineApiClient, types::engine::ForkchoiceState}, + rpc::{ + api::EngineApiClient, + types::engine::{ForkchoiceState, PayloadStatusEnum}, + }, }; use reth_payload_builder::PayloadId; use reth_primitives::B256; @@ -30,6 +33,7 @@ impl EngineApiHelper { &self, payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, + expected_status: PayloadStatusEnum, ) -> eyre::Result where E::ExecutionPayloadV3: From + PayloadEnvelopeExt, @@ -45,8 +49,10 @@ impl EngineApiHelper { payload_builder_attributes.parent_beacon_block_root().unwrap(), ) .await?; - assert!(submission.is_valid(), "{}", submission); - Ok(submission.latest_valid_hash.unwrap()) + + assert!(submission.status == expected_status); + + Ok(submission.latest_valid_hash.unwrap_or_default()) } /// Sends forkchoice update to the engine api @@ -64,4 +70,20 @@ impl EngineApiHelper { Ok(()) } + + /// Sends forkchoice update to the engine api with a zero finalized hash + pub async fn update_optimistic_forkchoice(&self, hash: B256) -> eyre::Result<()> { + EngineApiClient::::fork_choice_updated_v2( + &self.engine_api_client, + ForkchoiceState { + head_block_hash: hash, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }, + None, + ) + .await?; + + Ok(()) + } } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 016fb4d3e..043d1e0c6 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,3 +1,22 @@ +use node::NodeHelper; +use reth::{ + args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, + blockchain_tree::ShareableBlockchainTree, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + revm::EvmProcessorFactory, + tasks::TaskManager, +}; +use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_node_builder::{ + components::{NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + FullNodeComponentsAdapter, FullNodeTypesAdapter, NodeTypes, +}; +use reth_primitives::ChainSpec; +use reth_provider::providers::BlockchainProvider; +use std::sync::Arc; +use tracing::{span, Level}; +use wallet::Wallet; + /// Wrapper type to create test nodes pub mod node; @@ -15,3 +34,78 @@ mod engine_api; /// Helper traits mod traits; + +/// Creates the initial setup with `num_nodes` started and interconnected. +pub async fn setup( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, +) -> eyre::Result<(Vec>, TaskManager, Wallet)> +where + N: Default + reth_node_builder::Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: NetworkBuilder, TmpPool>, + N::PayloadBuilder: PayloadServiceBuilder, TmpPool>, +{ + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes and peer them + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + + for idx in 0..num_nodes { + let mut node_config = NodeConfig::test() + .with_chain(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + if is_dev { + node_config = node_config.dev(); + } + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(Default::default()) + .launch() + .await?; + + let mut node = NodeHelper::new(node).await?; + + // Connect each node in a chain. + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) +} + +// Type aliases + +type TmpDB = Arc>; +type EvmType = EvmProcessorFactory<::Evm>; +type RethProvider = BlockchainProvider>>; +type TmpPool = <>>::PoolBuilder as PoolBuilder< + TmpNodeAdapter, +>>::Pool; +type TmpNodeAdapter = FullNodeTypesAdapter>; + +/// Type alias for a type of NodeHelper +pub type NodeHelperType = NodeHelper, TmpPool>>; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index d88a428f0..18d147fd9 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -4,21 +4,18 @@ use crate::{ }; use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; +use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, - providers::{BlockReaderIdExt, CanonStateSubscriptions}, + providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::{ eth::{error::EthResult, EthTransactions}, - types::engine::PayloadAttributes, + types::engine::PayloadStatusEnum, }, }; -use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, BlockNumber, Bytes, B256}; -use std::{ - marker::PhantomData, - time::{SystemTime, UNIX_EPOCH}, -}; +use reth_primitives::{stage::StageId, BlockHash, BlockNumber, Bytes, B256}; +use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; /// An helper struct to handle node actions @@ -27,7 +24,7 @@ where Node: FullNodeComponents, { pub inner: FullNode, - payload: PayloadHelper, + pub payload: PayloadHelper, pub network: NetworkHelper, pub engine_api: EngineApiHelper, } @@ -52,12 +49,53 @@ where }) } - /// Advances the node forward + pub async fn connect(&mut self, node: &mut NodeHelper) { + self.network.add_peer(node.network.record()).await; + node.network.add_peer(self.network.record()).await; + node.network.expect_session().await; + self.network.expect_session().await; + } + + /// Advances the chain `length` blocks. + /// + /// Returns the added chain as a Vec of block hashes. pub async fn advance( + &mut self, + length: u64, + tx_generator: impl Fn() -> Pin>>, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + + Copy, + ) -> eyre::Result< + Vec<( + ::BuiltPayload, + ::PayloadBuilderAttributes, + )>, + > + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { + let mut chain = Vec::with_capacity(length as usize); + for _ in 0..length { + let (payload, _) = + self.advance_block(tx_generator().await, attributes_generator).await?; + chain.push(payload); + } + Ok(chain) + } + + /// Advances the node forward one block + pub async fn advance_block( &mut self, raw_tx: Bytes, attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, - ) -> eyre::Result<(B256, B256)> + ) -> eyre::Result<( + ( + ::BuiltPayload, + ::PayloadBuilderAttributes, + ), + B256, + )> where ::ExecutionPayloadV3: From<::BuiltPayload> + PayloadEnvelopeExt, @@ -81,15 +119,54 @@ where let payload = self.payload.expect_built_payload().await?; // submit payload via engine api - let block_number = payload.block().number; - let block_hash = self.engine_api.submit_payload(payload, eth_attr.clone()).await?; + let block_hash = self + .engine_api + .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) + .await?; // trigger forkchoice update via engine api to commit the block to the blockchain self.engine_api.update_forkchoice(block_hash).await?; // assert the block has been committed to the blockchain - self.assert_new_block(tx_hash, block_hash, block_number).await?; - Ok((block_hash, tx_hash)) + self.assert_new_block(tx_hash, block_hash, payload.block().number).await?; + Ok(((payload, eth_attr), tx_hash)) + } + + /// Waits for block to be available on node. + pub async fn wait_block( + &self, + number: BlockNumber, + expected_block_hash: BlockHash, + wait_finish_checkpoint: bool, + ) -> eyre::Result<()> { + let mut check = !wait_finish_checkpoint; + loop { + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + + if !check && wait_finish_checkpoint { + if let Some(checkpoint) = + self.inner.provider.get_stage_checkpoint(StageId::Finish)? + { + if checkpoint.block_number >= number { + check = true + } + } + } + + if check { + if let Some(latest_block) = self.inner.provider.block_by_number(number)? { + if latest_block.hash_slow() != expected_block_hash { + // TODO: only if its awaiting a reorg + continue + } + break + } + if wait_finish_checkpoint { + panic!("Finish checkpoint matches, but could not fetch block."); + } + } + } + Ok(()) } /// Injects a raw transaction into the node tx pool via RPC server @@ -129,17 +206,3 @@ where Ok(()) } } - -/// Helper function to create a new eth payload attributes -pub fn eth_payload_attributes() -> EthPayloadBuilderAttributes { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); - - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - EthPayloadBuilderAttributes::new(B256::ZERO, attributes) -} diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 37138cdd3..2d349721b 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -7,7 +7,7 @@ use tokio_stream::wrappers::BroadcastStream; pub struct PayloadHelper { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, - timestamp: u64, + pub timestamp: u64, } impl PayloadHelper { diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index 43fe7555d..d064eede9 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,11 +1,11 @@ use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; use alloy_rpc_types::{TransactionInput, TransactionRequest}; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; -use reth_primitives::{Address, Bytes, U256}; +use reth_primitives::{hex, Address, Bytes, U256}; /// One of the accounts of the genesis allocations. pub struct Wallet { inner: LocalWallet, - nonce: u64, + pub nonce: u64, chain_id: u64, } @@ -27,6 +27,11 @@ impl Wallet { self.tx(None).await } + pub async fn optimism_l1_block_info_tx(&mut self) -> Bytes { + let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + self.tx(Some(l1_block_info)).await + } + /// Creates a transaction with data and signs it pub async fn tx(&mut self, data: Option) -> Bytes { let tx = TransactionRequest { diff --git a/crates/node-ethereum/tests/e2e/dev.rs b/crates/node-ethereum/tests/e2e/dev.rs index b096bda5a..4570a8c0e 100644 --- a/crates/node-ethereum/tests/e2e/dev.rs +++ b/crates/node-ethereum/tests/e2e/dev.rs @@ -1,43 +1,27 @@ -use futures_util::StreamExt; -use reth::{ - api::FullNodeComponents, - builder::{FullNode, NodeBuilder, NodeHandle}, - providers::CanonStateSubscriptions, - rpc::eth::EthTransactions, - tasks::TaskManager, -}; -use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::EthereumNode; +use crate::utils::EthNode; +use futures::StreamExt; +use reth::rpc::eth::EthTransactions; +use reth_e2e_test_utils::setup; use reth_primitives::{b256, hex, ChainSpec, Genesis}; +use reth_provider::CanonStateSubscriptions; use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - let tasks = TaskManager::current(); + reth_tracing::init_test_tracing(); + let (mut nodes, _tasks, _) = setup(1, custom_chain(), true).await?; - // create node config - let node_config = NodeConfig::test() - .dev() - .with_rpc(RpcServerArgs::default().with_http().with_unused_ports()) - .with_chain(custom_chain()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(tasks.executor()) - .node(EthereumNode::default()) - .launch() - .await?; - - assert_chain_advances(node).await; + assert_chain_advances(nodes.pop().unwrap()).await; Ok(()) } -async fn assert_chain_advances(mut node: FullNode) { - let mut notifications = node.provider.canonical_state_stream(); +async fn assert_chain_advances(mut node: EthNode) { + let mut notifications = node.inner.provider.canonical_state_stream(); // submit tx through rpc let raw_tx = hex!("02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090"); - let eth_api = node.rpc_registry.eth_api(); + let eth_api = node.inner.rpc_registry.eth_api(); let hash = eth_api.send_raw_transaction(raw_tx.into()).await.unwrap(); diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/node-ethereum/tests/e2e/eth.rs index 6f9eeb999..39ba5e232 100644 --- a/crates/node-ethereum/tests/e2e/eth.rs +++ b/crates/node-ethereum/tests/e2e/eth.rs @@ -4,7 +4,7 @@ use reth::{ builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; +use reth_e2e_test_utils::{node::NodeHelper, setup, wallet::Wallet}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; @@ -13,38 +13,24 @@ use std::sync::Arc; async fn can_run_eth_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let exec = TaskManager::current(); - let exec = exec.executor(); - - // Chain spec with test allocs - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .cancun_activated() - .build(), - ); - - // Node setup - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(EthereumNode::default()) - .launch() - .await?; - let mut node = NodeHelper::new(node).await?; - - // Configure wallet from test mnemonic and create dummy transfer tx - let mut wallet = Wallet::default(); + let (mut nodes, _tasks, mut wallet) = setup::( + 1, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + ) + .await?; + + let mut node = nodes.pop().unwrap(); let raw_tx = wallet.transfer_tx().await; // make the node advance - node.advance(raw_tx, eth_payload_attributes).await?; + node.advance_block(raw_tx, eth_payload_attributes).await?; Ok(()) } @@ -83,7 +69,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let raw_tx = wallet.transfer_tx().await; // make the node advance - node.advance(raw_tx, crate::utils::eth_payload_attributes).await?; + node.advance_block(raw_tx, crate::utils::eth_payload_attributes).await?; Ok(()) } diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/node-ethereum/tests/e2e/p2p.rs index 940096e18..c7ce2a7c1 100644 --- a/crates/node-ethereum/tests/e2e/p2p.rs +++ b/crates/node-ethereum/tests/e2e/p2p.rs @@ -1,71 +1,34 @@ -use std::sync::Arc; - use crate::utils::eth_payload_attributes; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; +use reth_e2e_test_utils::setup; use reth_node_ethereum::EthereumNode; -use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; +use reth_primitives::{ChainSpecBuilder, MAINNET}; +use std::sync::Arc; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .cancun_activated() - .build(), - ); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_network(network_config) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let (mut nodes, _tasks, mut wallet) = setup::( + 2, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + ) + .await?; - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(EthereumNode::default()) - .launch() - .await?; - - let mut first_node = NodeHelper::new(node.clone()).await?; - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(EthereumNode::default()) - .launch() - .await?; - - let mut second_node = NodeHelper::new(node).await?; - - let mut wallet = Wallet::default(); let raw_tx = wallet.transfer_tx().await; - - // Make them peer - first_node.network.add_peer(second_node.network.record()).await; - second_node.network.add_peer(first_node.network.record()).await; - - // Make sure they establish a new session - first_node.network.expect_session().await; - second_node.network.expect_session().await; + let mut second_node = nodes.pop().unwrap(); + let mut first_node = nodes.pop().unwrap(); // Make the first node advance - let (block_hash, tx_hash) = first_node.advance(raw_tx.clone(), eth_payload_attributes).await?; + let ((payload, _), tx_hash) = + first_node.advance_block(raw_tx.clone(), eth_payload_attributes).await?; + let block_hash = payload.block().hash(); // only send forkchoice update to second node second_node.engine_api.update_forkchoice(block_hash).await?; diff --git a/crates/node-ethereum/tests/e2e/utils.rs b/crates/node-ethereum/tests/e2e/utils.rs index 52526c45f..2c1dc373b 100644 --- a/crates/node-ethereum/tests/e2e/utils.rs +++ b/crates/node-ethereum/tests/e2e/utils.rs @@ -1,7 +1,12 @@ use reth::rpc::types::engine::PayloadAttributes; +use reth_e2e_test_utils::NodeHelperType; +use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, B256}; +/// Ethereum Node Helper type +pub(crate) type EthNode = NodeHelperType; + /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { let attributes = PayloadAttributes { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 8f10c00d7..f242adf5a 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -28,7 +28,7 @@ reth-network.workspace = true reth-interfaces.workspace = true reth-evm.workspace = true reth-revm.workspace = true - +reth-beacon-consensus.workspace = true revm.workspace = true revm-primitives.workspace = true @@ -67,4 +67,5 @@ optimism = [ "reth-rpc/optimism", "reth-revm/optimism", "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", ] diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 5fe4daa7b..da6af2090 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,80 +1,28 @@ +use crate::utils::{advance_chain, setup}; use std::sync::Arc; - -use crate::utils::optimism_payload_attributes; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; -use reth_node_optimism::node::OptimismNode; -use reth_primitives::{hex, Bytes, ChainSpecBuilder, Genesis, BASE_MAINNET}; +use tokio::sync::Mutex; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(BASE_MAINNET.chain) - .genesis(genesis) - .ecotone_activated() - .build(), - ); - let mut wallet = Wallet::default().with_chain_id(chain_spec.chain.into()); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_network(network_config) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(OptimismNode::default()) - .launch() - .await?; - - let mut first_node = NodeHelper::new(node.clone()).await?; - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(OptimismNode::default()) - .launch() - .await?; - - let mut second_node = NodeHelper::new(node).await?; - - // Make them peer - first_node.network.add_peer(second_node.network.record()).await; - second_node.network.add_peer(first_node.network.record()).await; - - // Make sure they establish a new session - first_node.network.expect_session().await; - second_node.network.expect_session().await; + let (mut nodes, _tasks, wallet) = setup(2).await?; + let wallet = Arc::new(Mutex::new(wallet)); - // Taken from optimism tests - let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + let second_node = nodes.pop().unwrap(); + let mut first_node = nodes.pop().unwrap(); - // Make the first node advance - let raw_tx = wallet.tx(Some(l1_block_info)).await; - let (block_hash, tx_hash) = - first_node.advance(raw_tx.clone(), optimism_payload_attributes).await?; + let tip: usize = 300; + let tip_index: usize = tip - 1; - // only send forkchoice update to second node - second_node.engine_api.update_forkchoice(block_hash).await?; + // On first node, create a chain up to block number 300a + let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; + let canonical_chain = + canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); - // expect second node advanced via p2p gossip - second_node.assert_new_block(tx_hash, block_hash, 1).await?; + // On second node, sync up to block number 300a + second_node.engine_api.update_forkchoice(canonical_chain[tip_index]).await?; + second_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; Ok(()) } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 1f655502e..5322cad9a 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,7 +1,45 @@ -use reth::rpc::types::engine::PayloadAttributes; -use reth_node_optimism::OptimismPayloadBuilderAttributes; +use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType}; +use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, B256}; +use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Optimism Node Helper type +pub(crate) type OpNode = NodeHelperType; + +pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + reth_e2e_test_utils::setup( + num_nodes, + Arc::new( + ChainSpecBuilder::default() + .chain(BASE_MAINNET.chain) + .genesis(genesis) + .ecotone_activated() + .build(), + ), + false, + ) + .await +} + +pub(crate) async fn advance_chain( + length: usize, + node: &mut OpNode, + wallet: Arc>, +) -> eyre::Result> { + node.advance( + length as u64, + || { + let wallet = wallet.clone(); + Box::pin(async move { wallet.lock().await.optimism_l1_block_info_tx().await }) + }, + optimism_payload_attributes, + ) + .await +} /// Helper function to create a new eth payload attributes pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index e73357f4a..78ef74085 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,7 +2,7 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, EvmEnvProvider, StateProviderFactory, + DatabaseProviderFactory, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_db::database::Database; @@ -16,6 +16,7 @@ pub trait FullProvider: + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + + StageCheckpointReader + Clone + Unpin + 'static @@ -31,6 +32,7 @@ impl FullProvider for T where + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + + StageCheckpointReader + Clone + Unpin + 'static From 00ca9cd0241e639bdf991436d0166ca086694437 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 23 Apr 2024 16:50:14 +0200 Subject: [PATCH 012/250] feat: add prague helpers to spec (#7817) --- crates/primitives/src/chain/spec.rs | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index ee732a9bc..fb657b5f9 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -813,6 +813,15 @@ impl ChainSpec { .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) } + /// Convenience method to check if [Hardfork::Prague] is active at a given timestamp. + #[inline] + pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork_timestamps + .prague + .map(|prague| timestamp >= prague) + .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp)) + } + /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. #[inline] pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { @@ -1055,10 +1064,12 @@ impl From for ChainSpec { /// Various timestamps of forks #[derive(Debug, Clone, Default, Eq, PartialEq)] pub struct ForkTimestamps { - /// The timestamp of the shanghai fork + /// The timestamp of the Shanghai fork pub shanghai: Option, - /// The timestamp of the cancun fork + /// The timestamp of the Cancun fork pub cancun: Option, + /// The timestamp of the Prague fork + pub prague: Option, /// The timestamp of the Regolith fork #[cfg(feature = "optimism")] pub regolith: Option, @@ -1080,6 +1091,9 @@ impl ForkTimestamps { if let Some(cancun) = forks.get(&Hardfork::Cancun).and_then(|f| f.as_timestamp()) { timestamps = timestamps.cancun(cancun); } + if let Some(prague) = forks.get(&Hardfork::Prague).and_then(|f| f.as_timestamp()) { + timestamps = timestamps.prague(prague); + } #[cfg(feature = "optimism")] { if let Some(regolith) = forks.get(&Hardfork::Regolith).and_then(|f| f.as_timestamp()) { @@ -1095,18 +1109,24 @@ impl ForkTimestamps { timestamps } - /// Sets the given shanghai timestamp + /// Sets the given Shanghai timestamp pub fn shanghai(mut self, shanghai: u64) -> Self { self.shanghai = Some(shanghai); self } - /// Sets the given cancun timestamp + /// Sets the given Cancun timestamp pub fn cancun(mut self, cancun: u64) -> Self { self.cancun = Some(cancun); self } + /// Sets the given Prague timestamp + pub fn prague(mut self, prague: u64) -> Self { + self.prague = Some(prague); + self + } + /// Sets the given regolith timestamp #[cfg(feature = "optimism")] pub fn regolith(mut self, regolith: u64) -> Self { From ee1c811c43e9f2dd136f07d28deac6224d6c010d Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Tue, 23 Apr 2024 23:08:32 +0800 Subject: [PATCH 013/250] feat: integrate RpcMiddleware in IPC (#7790) --- Cargo.lock | 2 + crates/ethereum/evm/src/execute.rs | 1 - crates/node-core/src/args/rpc_server_args.rs | 2 +- crates/node-core/src/cli/config.rs | 2 +- crates/optimism/node/src/evm/execute.rs | 1 - crates/rpc/ipc/Cargo.toml | 2 + crates/rpc/ipc/src/server/future.rs | 2 +- crates/rpc/ipc/src/server/ipc.rs | 196 +++--------- crates/rpc/ipc/src/server/mod.rs | 311 ++++++++++++++++--- crates/rpc/ipc/src/server/rpc_service.rs | 138 ++++++++ crates/rpc/rpc-builder/src/auth.rs | 6 +- crates/rpc/rpc-builder/src/lib.rs | 17 +- 12 files changed, 483 insertions(+), 197 deletions(-) create mode 100644 crates/rpc/ipc/src/server/rpc_service.rs diff --git a/Cargo.lock b/Cargo.lock index 3bd9ff743..9e34b6c61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6686,9 +6686,11 @@ dependencies = [ "async-trait", "bytes", "futures", + "futures-util", "jsonrpsee", "parity-tokio-ipc", "pin-project", + "reth-tracing", "serde_json", "thiserror", "tokio", diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b23c35cfd..64b69d1e5 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -465,7 +465,6 @@ where #[cfg(test)] mod tests { use super::*; - use crate::EthEvmConfig; use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index b12f2740a..da3095815 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -437,7 +437,7 @@ impl RethRpcConfig for RpcServerArgs { .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get()) } - fn ipc_server_builder(&self) -> IpcServerBuilder { + fn ipc_server_builder(&self) -> IpcServerBuilder { IpcServerBuilder::default() .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get()) .max_request_body_size(self.rpc_max_request_size_bytes()) diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 988ef34d5..1bce398ef 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -49,7 +49,7 @@ pub trait RethRpcConfig { fn http_ws_server_builder(&self) -> ServerBuilder; /// Returns the default ipc server builder - fn ipc_server_builder(&self) -> IpcServerBuilder; + fn ipc_server_builder(&self) -> IpcServerBuilder; /// Creates the [RpcServerConfig] from cli args. fn rpc_server_config(&self) -> RpcServerConfig; diff --git a/crates/optimism/node/src/evm/execute.rs b/crates/optimism/node/src/evm/execute.rs index cca13fb7d..f51c6cd3b 100644 --- a/crates/optimism/node/src/evm/execute.rs +++ b/crates/optimism/node/src/evm/execute.rs @@ -545,7 +545,6 @@ mod tests { use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr}; - use crate::OptimismEvmConfig; use reth_revm::test_utils::StateProviderTest; fn create_op_state_provider() -> StateProviderTest { diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 8d93a275c..21b645409 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -29,6 +29,8 @@ serde_json.workspace = true tracing.workspace = true bytes.workspace = true thiserror.workspace = true +futures-util = "0.3.30" [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } +reth-tracing.workspace = true diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index 84df306a5..65aaccc88 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -84,7 +84,7 @@ where while i < self.futures.len() { if self.futures[i].poll_unpin(cx).is_ready() { - // Using `swap_remove` since we don't care about ordering + // Using `swap_remove` since we don't care about ordering, // but we do care about removing being `O(1)`. // // We don't increment `i` in this branch, since we now diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 8ce4502a2..1fd600c03 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -1,5 +1,7 @@ //! IPC request handling adapted from [`jsonrpsee`] http request handling +use std::sync::Arc; + use futures::{stream::FuturesOrdered, StreamExt}; use jsonrpsee::{ batch_response_error, @@ -8,15 +10,10 @@ use jsonrpsee::{ tracing::server::{rx_log_from_json, tx_log_from_str}, JsonRawValue, }, - server::IdProvider, - types::{ - error::{reject_too_many_subscriptions, ErrorCode}, - ErrorObject, Id, InvalidRequest, Notification, Params, Request, - }, - BatchResponseBuilder, BoundedSubscriptions, CallOrSubscription, MethodCallback, MethodResponse, - MethodSink, Methods, ResponsePayload, SubscriptionState, + server::middleware::rpc::RpcServiceT, + types::{error::ErrorCode, ErrorObject, Id, InvalidRequest, Notification, Request}, + BatchResponseBuilder, MethodResponse, ResponsePayload, }; -use std::{sync::Arc, time::Instant}; use tokio::sync::OwnedSemaphorePermit; use tokio_util::either::Either; use tracing::instrument; @@ -24,42 +21,33 @@ use tracing::instrument; type Notif<'a> = Notification<'a, Option<&'a JsonRawValue>>; #[derive(Debug, Clone)] -pub(crate) struct Batch<'a> { +pub(crate) struct Batch { data: Vec, - call: CallData<'a>, -} - -#[derive(Debug, Clone)] -pub(crate) struct CallData<'a> { - conn_id: usize, - methods: &'a Methods, - id_provider: &'a dyn IdProvider, - sink: &'a MethodSink, - max_response_body_size: u32, - max_log_length: u32, - request_start: Instant, - bounded_subscriptions: BoundedSubscriptions, + rpc_service: S, } // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. #[instrument(name = "batch", skip(b), level = "TRACE")] -pub(crate) async fn process_batch_request(b: Batch<'_>) -> Option { - let Batch { data, call } = b; +pub(crate) async fn process_batch_request( + b: Batch, + max_response_body_size: usize, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ + let Batch { data, rpc_service } = b; if let Ok(batch) = serde_json::from_slice::>(&data) { let mut got_notif = false; - let mut batch_response = - BatchResponseBuilder::new_with_limit(call.max_response_body_size as usize); + let mut batch_response = BatchResponseBuilder::new_with_limit(max_response_body_size); let mut pending_calls: FuturesOrdered<_> = batch .into_iter() .filter_map(|v| { if let Ok(req) = serde_json::from_str::>(v.get()) { - Some(Either::Right(async { - execute_call(req, call.clone()).await.into_response() - })) + Some(Either::Right(rpc_service.call(req))) } else if let Ok(_notif) = serde_json::from_str::>(v.get()) { // notifications should not be answered. got_notif = true; @@ -95,92 +83,32 @@ pub(crate) async fn process_batch_request(b: Batch<'_>) -> Option { } } -pub(crate) async fn process_single_request( +pub(crate) async fn process_single_request( data: Vec, - call: CallData<'_>, -) -> Option { + rpc_service: &S, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ if let Ok(req) = serde_json::from_slice::>(&data) { - Some(execute_call_with_tracing(req, call).await) + Some(execute_call_with_tracing(req, rpc_service).await) } else if serde_json::from_slice::>(&data).is_ok() { None } else { let (id, code) = prepare_error(&data); - Some(CallOrSubscription::Call(MethodResponse::error(id, ErrorObject::from(code)))) + Some(MethodResponse::error(id, ErrorObject::from(code))) } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(call, req), level = "TRACE")] -pub(crate) async fn execute_call_with_tracing<'a>( +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, - call: CallData<'_>, -) -> CallOrSubscription { - execute_call(req, call).await -} - -pub(crate) async fn execute_call(req: Request<'_>, call: CallData<'_>) -> CallOrSubscription { - let CallData { - methods, - max_response_body_size, - max_log_length, - conn_id, - id_provider, - sink, - request_start, - bounded_subscriptions, - } = call; - - rx_log_from_json(&req, call.max_log_length); - - let params = Params::new(req.params.as_ref().map(|params| params.get())); - let name = &req.method; - let id = req.id; - - let response = match methods.method_with_name(name) { - None => { - let response = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound)); - CallOrSubscription::Call(response) - } - Some((_name, method)) => match method { - MethodCallback::Sync(callback) => { - let response = (callback)(id, params, max_response_body_size as usize); - CallOrSubscription::Call(response) - } - MethodCallback::Async(callback) => { - let id = id.into_owned(); - let params = params.into_owned(); - let response = - (callback)(id, params, conn_id, max_response_body_size as usize).await; - CallOrSubscription::Call(response) - } - MethodCallback::AsyncWithDetails(_callback) => { - unimplemented!() - } - MethodCallback::Subscription(callback) => { - if let Some(p) = bounded_subscriptions.acquire() { - let conn_state = - SubscriptionState { conn_id, id_provider, subscription_permit: p }; - let response = callback(id, params, sink.clone(), conn_state).await; - CallOrSubscription::Subscription(response) - } else { - let response = MethodResponse::error( - id, - reject_too_many_subscriptions(bounded_subscriptions.max()), - ); - CallOrSubscription::Call(response) - } - } - MethodCallback::Unsubscription(callback) => { - // Don't adhere to any resource or subscription limits; always let unsubscribing - // happen! - let result = callback(id, params, conn_id, max_response_body_size as usize); - CallOrSubscription::Call(result) - } - }, - }; - - tx_log_from_str(response.as_response().as_result(), max_log_length); - let _ = request_start; - response + rpc_service: &S, +) -> MethodResponse +where + for<'b> S: RpcServiceT<'b> + Send, +{ + rpc_service.call(req).await } #[instrument(name = "notification", fields(method = notif.method.as_ref()), skip(notif, max_log_length), level = "TRACE")] @@ -192,31 +120,15 @@ fn execute_notification(notif: &Notif<'_>, max_log_length: u32) -> MethodRespons response } -#[allow(dead_code)] -pub(crate) struct HandleRequest { - pub(crate) methods: Methods, - pub(crate) max_request_body_size: u32, - pub(crate) max_response_body_size: u32, - pub(crate) max_log_length: u32, - pub(crate) batch_requests_supported: bool, - pub(crate) conn: Arc, - pub(crate) bounded_subscriptions: BoundedSubscriptions, - pub(crate) method_sink: MethodSink, - pub(crate) id_provider: Arc, -} - -pub(crate) async fn handle_request(request: String, input: HandleRequest) -> Option { - let HandleRequest { - methods, - max_response_body_size, - max_log_length, - conn, - bounded_subscriptions, - method_sink, - id_provider, - .. - } = input; - +pub(crate) async fn call_with_service( + request: String, + rpc_service: S, + max_response_body_size: usize, + conn: Arc, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ enum Kind { Single, Batch, @@ -231,31 +143,23 @@ pub(crate) async fn handle_request(request: String, input: HandleRequest) -> Opt }) .unwrap_or(Kind::Single); - let call = CallData { - conn_id: 0, - methods: &methods, - id_provider: &*id_provider, - sink: &method_sink, - max_response_body_size, - max_log_length, - request_start: Instant::now(), - bounded_subscriptions, - }; - // Single request or notification let res = if matches!(request_kind, Kind::Single) { - let response = process_single_request(request.into_bytes(), call).await; + let response = process_single_request(request.into_bytes(), &rpc_service).await; match response { - Some(CallOrSubscription::Call(response)) => Some(response.to_result()), - Some(CallOrSubscription::Subscription(_)) => { + Some(response) if response.is_method_call() => Some(response.to_result()), + _ => { // subscription responses are sent directly over the sink, return a response here // would lead to duplicate responses for the subscription response None } - None => None, } } else { - process_batch_request(Batch { data: request.into_bytes(), call }).await + process_batch_request( + Batch { data: request.into_bytes(), rpc_service }, + max_response_body_size, + ) + .await }; drop(conn); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index e6d1a6051..7afb6bb7d 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -7,7 +7,10 @@ use crate::server::{ use futures::{FutureExt, Stream, StreamExt}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, - server::{AlreadyStoppedError, IdProvider, RandomIntegerIdProvider}, + server::{ + middleware::rpc::{either::Either, RpcLoggerLayer, RpcServiceT}, + AlreadyStoppedError, IdProvider, RandomIntegerIdProvider, + }, BoundedSubscriptions, MethodSink, Methods, }; use std::{ @@ -21,36 +24,57 @@ use tokio::{ io::{AsyncRead, AsyncWrite}, sync::{oneshot, watch, OwnedSemaphorePermit}, }; -use tower::{layer::util::Identity, Service}; +use tower::{layer::util::Identity, Layer, Service}; use tracing::{debug, trace, warn}; // re-export so can be used during builder setup -use crate::server::connection::IpcConnDriver; +use crate::server::{ + connection::IpcConnDriver, + rpc_service::{RpcService, RpcServiceCfg}, +}; pub use parity_tokio_ipc::Endpoint; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; +use tower::layer::{util::Stack, LayerFn}; mod connection; mod future; mod ipc; +mod rpc_service; /// Ipc Server implementation // This is an adapted `jsonrpsee` Server, but for `Ipc` connections. -pub struct IpcServer { +pub struct IpcServer { /// The endpoint we listen for incoming transactions endpoint: Endpoint, id_provider: Arc, cfg: Settings, - service_builder: tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + http_middleware: tower::ServiceBuilder, } -impl IpcServer { +impl IpcServer { /// Returns the configured [Endpoint] pub fn endpoint(&self) -> &Endpoint { &self.endpoint } +} +impl IpcServer +where + RpcMiddleware: Layer + Clone + Send + 'static, + for<'a> >::Service: RpcServiceT<'a>, + HttpMiddleware: Layer> + Send + 'static, + >>::Service: Send + + Service< + String, + Response = Option, + Error = Box, + >, + <>>::Service as Service>::Future: + Send + Unpin, +{ /// Start responding to connections requests. /// /// This will run on the tokio runtime until the server is stopped or the ServerHandle is @@ -123,7 +147,7 @@ impl IpcServer { let incoming = match self.endpoint.incoming() { Ok(connections) => { #[cfg(windows)] - let connections = Box::pin(connections); + let connections = Box::pin(connections); Incoming::new(connections) } Err(err) => { @@ -154,7 +178,7 @@ impl IpcServer { let (tx, rx) = mpsc::channel::(message_buffer_capacity as usize); let method_sink = MethodSink::new_with_limit(tx, max_response_body_size); - let tower_service = TowerService { + let tower_service = TowerServiceNoHttp { inner: ServiceData { methods: methods.clone(), max_request_body_size, @@ -170,9 +194,10 @@ impl IpcServer { ), method_sink, }, + rpc_middleware: self.rpc_middleware.clone(), }; - let service = self.service_builder.service(tower_service); + let service = self.http_middleware.service(tower_service); connections.add(Box::pin(spawn_connection( ipc, service, @@ -244,16 +269,87 @@ pub(crate) struct ServiceData { pub(crate) method_sink: MethodSink, } +/// Similar to [`tower::ServiceBuilder`] but doesn't +/// support any tower middleware implementations. +#[derive(Debug, Clone)] +pub struct RpcServiceBuilder(tower::ServiceBuilder); + +impl Default for RpcServiceBuilder { + fn default() -> Self { + RpcServiceBuilder(tower::ServiceBuilder::new()) + } +} + +impl RpcServiceBuilder { + /// Create a new [`RpcServiceBuilder`]. + pub fn new() -> Self { + Self(tower::ServiceBuilder::new()) + } +} + +impl RpcServiceBuilder { + /// Optionally add a new layer `T` to the [`RpcServiceBuilder`]. + /// + /// See the documentation for [`tower::ServiceBuilder::option_layer`] for more details. + pub fn option_layer( + self, + layer: Option, + ) -> RpcServiceBuilder, L>> { + let layer = if let Some(layer) = layer { + Either::Left(layer) + } else { + Either::Right(Identity::new()) + }; + self.layer(layer) + } + + /// Add a new layer `T` to the [`RpcServiceBuilder`]. + /// + /// See the documentation for [`tower::ServiceBuilder::layer`] for more details. + pub fn layer(self, layer: T) -> RpcServiceBuilder> { + RpcServiceBuilder(self.0.layer(layer)) + } + + /// Add a [`tower::Layer`] built from a function that accepts a service and returns another + /// service. + /// + /// See the documentation for [`tower::ServiceBuilder::layer_fn`] for more details. + pub fn layer_fn(self, f: F) -> RpcServiceBuilder, L>> { + RpcServiceBuilder(self.0.layer_fn(f)) + } + + /// Add a logging layer to [`RpcServiceBuilder`] + /// + /// This logs each request and response for every call. + pub fn rpc_logger(self, max_log_len: u32) -> RpcServiceBuilder> { + RpcServiceBuilder(self.0.layer(RpcLoggerLayer::new(max_log_len))) + } + + /// Wrap the service `S` with the middleware. + pub(crate) fn service(&self, service: S) -> L::Service + where + L: tower::Layer, + { + self.0.service(service) + } +} + /// JsonRPSee service compatible with `tower`. /// /// # Note /// This is similar to [`hyper::service::service_fn`](https://docs.rs/hyper/latest/hyper/service/fn.service_fn.html). -#[derive(Debug)] -pub struct TowerService { +#[derive(Debug, Clone)] +pub struct TowerServiceNoHttp { inner: ServiceData, + rpc_middleware: RpcServiceBuilder, } -impl Service for TowerService { +impl Service for TowerServiceNoHttp +where + RpcMiddleware: for<'a> Layer, + >::Service: Send + Sync + 'static, + for<'a> >::Service: RpcServiceT<'a>, +{ /// The response of a handled RPC call /// /// This is an `Option` because subscriptions and call responses are handled differently. @@ -273,26 +369,32 @@ impl Service for TowerService { fn call(&mut self, request: String) -> Self::Future { trace!("{:?}", request); - // handle the request - let data = ipc::HandleRequest { - methods: self.inner.methods.clone(), - max_request_body_size: self.inner.max_request_body_size, - max_response_body_size: self.inner.max_response_body_size, - max_log_length: self.inner.max_log_length, - batch_requests_supported: true, - conn: self.inner.conn.clone(), - bounded_subscriptions: self.inner.bounded_subscriptions.clone(), - method_sink: self.inner.method_sink.clone(), + let cfg = RpcServiceCfg::CallsAndSubscriptions { + bounded_subscriptions: BoundedSubscriptions::new( + self.inner.max_subscriptions_per_connection, + ), id_provider: self.inner.id_provider.clone(), + sink: self.inner.method_sink.clone(), }; + let max_response_body_size = self.inner.max_response_body_size as usize; + let rpc_service = self.rpc_middleware.service(RpcService::new( + self.inner.methods.clone(), + max_response_body_size, + self.inner.conn_id as usize, + cfg, + )); + let conn = self.inner.conn.clone(); // an ipc connection needs to handle read+write concurrently // even if the underlying rpc handler spawns the actual work or is does a lot of async any // additional overhead performed by `handle_request` can result in I/O latencies, for // example tracing calls are relatively CPU expensive on serde::serialize alone, moving this // work to a separate task takes the pressure off the connection so all concurrent responses // are also serialized concurrently and the connection can focus on read+write - let f = tokio::task::spawn(async move { ipc::handle_request(request, data).await }); + let f = tokio::task::spawn(async move { + ipc::call_with_service(request, rpc_service, max_response_body_size, conn).await + }); + Box::pin(async move { f.await.map_err(|err| err.into()) }) } } @@ -413,24 +515,26 @@ impl Default for Settings { /// Builder to configure and create a JSON-RPC server #[derive(Debug)] -pub struct Builder { +pub struct Builder { settings: Settings, /// Subscription ID provider. id_provider: Arc, - service_builder: tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + http_middleware: tower::ServiceBuilder, } -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Builder { settings: Settings::default(), id_provider: Arc::new(RandomIntegerIdProvider), - service_builder: tower::ServiceBuilder::new(), + rpc_middleware: RpcServiceBuilder::new(), + http_middleware: tower::ServiceBuilder::new(), } } } -impl Builder { +impl Builder { /// Set the maximum size of a request body in bytes. Default is 10 MiB. pub fn max_request_body_size(mut self, size: u32) -> Self { self.settings.max_request_body_size = size; @@ -529,26 +633,114 @@ impl Builder { /// let builder = tower::ServiceBuilder::new(); /// /// let server = - /// reth_ipc::server::Builder::default().set_middleware(builder).build("/tmp/my-uds"); + /// reth_ipc::server::Builder::default().set_http_middleware(builder).build("/tmp/my-uds"); + /// } + /// ``` + pub fn set_http_middleware( + self, + service_builder: tower::ServiceBuilder, + ) -> Builder { + Builder { + settings: self.settings, + id_provider: self.id_provider, + http_middleware: service_builder, + rpc_middleware: self.rpc_middleware, + } + } + + /// Enable middleware that is invoked on every JSON-RPC call. + /// + /// The middleware itself is very similar to the `tower middleware` but + /// it has a different service trait which takes &self instead &mut self + /// which means that you can't use built-in middleware from tower. + /// + /// Another consequence of `&self` is that you must wrap any of the middleware state in + /// a type which is Send and provides interior mutability such `Arc`. + /// + /// The builder itself exposes a similar API as the [`tower::ServiceBuilder`] + /// where it is possible to compose layers to the middleware. + /// + /// ``` + /// use std::{ + /// net::SocketAddr, + /// sync::{ + /// atomic::{AtomicUsize, Ordering}, + /// Arc, + /// }, + /// time::Instant, + /// }; + /// + /// use futures_util::future::BoxFuture; + /// use jsonrpsee::{ + /// server::{middleware::rpc::RpcServiceT, ServerBuilder}, + /// types::Request, + /// MethodResponse, + /// }; + /// use reth_ipc::server::{Builder, RpcServiceBuilder}; + /// + /// #[derive(Clone)] + /// struct MyMiddleware { + /// service: S, + /// count: Arc, + /// } + /// + /// impl<'a, S> RpcServiceT<'a> for MyMiddleware + /// where + /// S: RpcServiceT<'a> + Send + Sync + Clone + 'static, + /// { + /// type Future = BoxFuture<'a, MethodResponse>; + /// + /// fn call(&self, req: Request<'a>) -> Self::Future { + /// tracing::info!("MyMiddleware processed call {}", req.method); + /// let count = self.count.clone(); + /// let service = self.service.clone(); + /// + /// Box::pin(async move { + /// let rp = service.call(req).await; + /// // Modify the state. + /// count.fetch_add(1, Ordering::Relaxed); + /// rp + /// }) + /// } /// } + /// + /// // Create a state per connection + /// // NOTE: The service type can be omitted once `start` is called on the server. + /// let m = RpcServiceBuilder::new().layer_fn(move |service: ()| MyMiddleware { + /// service, + /// count: Arc::new(AtomicUsize::new(0)), + /// }); + /// let builder = Builder::default().set_rpc_middleware(m); /// ``` - pub fn set_middleware(self, service_builder: tower::ServiceBuilder) -> Builder { - Builder { settings: self.settings, id_provider: self.id_provider, service_builder } + pub fn set_rpc_middleware( + self, + rpc_middleware: RpcServiceBuilder, + ) -> Builder { + Builder { + settings: self.settings, + id_provider: self.id_provider, + rpc_middleware, + http_middleware: self.http_middleware, + } } /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build(self, endpoint: impl AsRef) -> IpcServer { + pub fn build(self, endpoint: impl AsRef) -> IpcServer { let endpoint = Endpoint::new(endpoint.as_ref().to_string()); self.build_with_endpoint(endpoint) } /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build_with_endpoint(self, endpoint: Endpoint) -> IpcServer { + pub fn build_with_endpoint( + self, + endpoint: Endpoint, + ) -> IpcServer { IpcServer { endpoint, cfg: self.settings, id_provider: self.id_provider, - service_builder: self.service_builder, + http_middleware: self.http_middleware, + rpc_middleware: self.rpc_middleware, } } } @@ -589,7 +781,9 @@ mod tests { use futures::future::{select, Either}; use jsonrpsee::{ core::client::{ClientT, Subscription, SubscriptionClientT}, - rpc_params, PendingSubscriptionSink, RpcModule, SubscriptionMessage, + rpc_params, + types::Request, + PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; use parity_tokio_ipc::dummy_endpoint; use tokio::sync::broadcast; @@ -657,6 +851,7 @@ mod tests { #[tokio::test] async fn test_ipc_modules() { + reth_tracing::init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint); let mut module = RpcModule::new(()); @@ -703,4 +898,50 @@ mod tests { let items = sub.take(16).collect::>().await; assert_eq!(items.len(), 16); } + + #[tokio::test] + async fn test_rpc_middleware() { + #[derive(Clone)] + struct ModifyRequestIf(S); + + impl<'a, S> RpcServiceT<'a> for ModifyRequestIf + where + S: Send + Sync + RpcServiceT<'a>, + { + type Future = S::Future; + + fn call(&self, mut req: Request<'a>) -> Self::Future { + // Re-direct all calls that isn't `say_hello` to `say_goodbye` + if req.method == "say_hello" { + req.method = "say_goodbye".into(); + } else if req.method == "say_goodbye" { + req.method = "say_hello".into(); + } + + self.0.call(req) + } + } + + reth_tracing::init_test_tracing(); + let endpoint = dummy_endpoint(); + + let rpc_middleware = RpcServiceBuilder::new().layer_fn(ModifyRequestIf); + let server = Builder::default().set_rpc_middleware(rpc_middleware).build(&endpoint); + + let mut module = RpcModule::new(()); + let goodbye_msg = r#"{"jsonrpc":"2.0","id":1,"result":"goodbye"}"#; + let hello_msg = r#"{"jsonrpc":"2.0","id":2,"result":"hello"}"#; + module.register_method("say_hello", move |_, _| hello_msg).unwrap(); + module.register_method("say_goodbye", move |_, _| goodbye_msg).unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let say_hello_response: String = client.request("say_hello", rpc_params![]).await.unwrap(); + let say_goodbye_response: String = + client.request("say_goodbye", rpc_params![]).await.unwrap(); + + assert_eq!(say_hello_response, goodbye_msg); + assert_eq!(say_goodbye_response, hello_msg); + } } diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs new file mode 100644 index 000000000..94e9ed2aa --- /dev/null +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -0,0 +1,138 @@ +//! JSON-RPC service middleware. +use futures_util::future::BoxFuture; +use jsonrpsee::{ + server::{ + middleware::rpc::{ResponseFuture, RpcServiceT}, + IdProvider, + }, + types::{error::reject_too_many_subscriptions, ErrorCode, ErrorObject, Request}, + BoundedSubscriptions, ConnectionDetails, MethodCallback, MethodResponse, MethodSink, Methods, + SubscriptionState, +}; +use std::sync::Arc; + +/// JSON-RPC service middleware. +#[derive(Clone, Debug)] +pub struct RpcService { + conn_id: usize, + methods: Methods, + max_response_body_size: usize, + cfg: RpcServiceCfg, +} + +/// Configuration of the RpcService. +#[allow(dead_code)] +#[derive(Clone, Debug)] +pub(crate) enum RpcServiceCfg { + /// The server supports only calls. + OnlyCalls, + /// The server supports both method calls and subscriptions. + CallsAndSubscriptions { + bounded_subscriptions: BoundedSubscriptions, + sink: MethodSink, + id_provider: Arc, + }, +} + +impl RpcService { + /// Create a new service. + pub(crate) fn new( + methods: Methods, + max_response_body_size: usize, + conn_id: usize, + cfg: RpcServiceCfg, + ) -> Self { + Self { methods, max_response_body_size, conn_id, cfg } + } +} + +impl<'a> RpcServiceT<'a> for RpcService { + // The rpc module is already boxing the futures and + // it's used to under the hood by the RpcService. + type Future = ResponseFuture>; + + fn call(&self, req: Request<'a>) -> Self::Future { + let conn_id = self.conn_id; + let max_response_body_size = self.max_response_body_size; + + let params = req.params(); + let name = req.method_name(); + let id = req.id().clone(); + + match self.methods.method_with_name(name) { + None => { + let rp = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound)); + ResponseFuture::ready(rp) + } + Some((_name, method)) => match method { + MethodCallback::Async(callback) => { + let params = params.into_owned(); + let id = id.into_owned(); + + let fut = (callback)(id, params, conn_id, max_response_body_size); + ResponseFuture::future(fut) + } + MethodCallback::AsyncWithDetails(callback) => { + let params = params.into_owned(); + let id = id.into_owned(); + + // Note: Add the `Request::extensions` to the connection details when available + // here. + let fut = (callback)( + id, + params, + ConnectionDetails::_new(conn_id), + max_response_body_size, + ); + ResponseFuture::future(fut) + } + MethodCallback::Sync(callback) => { + let rp = (callback)(id, params, max_response_body_size); + ResponseFuture::ready(rp) + } + MethodCallback::Subscription(callback) => { + let RpcServiceCfg::CallsAndSubscriptions { + bounded_subscriptions, + sink, + id_provider, + } = self.cfg.clone() + else { + tracing::warn!("Subscriptions not supported"); + let rp = + MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); + return ResponseFuture::ready(rp); + }; + + if let Some(p) = bounded_subscriptions.acquire() { + let conn_state = SubscriptionState { + conn_id, + id_provider: &*id_provider.clone(), + subscription_permit: p, + }; + + let fut = callback(id.clone(), params, sink, conn_state); + ResponseFuture::future(fut) + } else { + let max = bounded_subscriptions.max(); + let rp = MethodResponse::error(id, reject_too_many_subscriptions(max)); + ResponseFuture::ready(rp) + } + } + MethodCallback::Unsubscription(callback) => { + // Don't adhere to any resource or subscription limits; always let unsubscribing + // happen! + + let RpcServiceCfg::CallsAndSubscriptions { .. } = self.cfg else { + tracing::warn!("Subscriptions not supported"); + let rp = + MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); + return ResponseFuture::ready(rp); + }; + + let rp = callback(id, params, conn_id, max_response_body_size); + ResponseFuture::ready(rp) + } + }, + } + } +} diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 2349c6e85..cd21be271 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -161,7 +161,7 @@ pub struct AuthServerConfig { /// Configs for JSON-RPC Http. pub(crate) server_config: ServerBuilder, /// Configs for IPC server - pub(crate) ipc_server_config: Option, + pub(crate) ipc_server_config: Option>, /// IPC endpoint pub(crate) ipc_endpoint: Option, } @@ -223,7 +223,7 @@ pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, server_config: Option>, - ipc_server_config: Option, + ipc_server_config: Option>, ipc_endpoint: Option, } @@ -289,7 +289,7 @@ impl AuthServerConfigBuilder { /// Configures the IPC server /// /// Note: this always configures an [EthSubscriptionIdProvider] - pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { + pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ef5b8868c..62f82b8f8 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -171,7 +171,9 @@ use jsonrpsee::{ use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_ipc::server::IpcServer; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +pub use reth_ipc::server::{ + Builder as IpcServerBuilder, Endpoint, RpcServiceBuilder as IpcRpcServiceBuilder, +}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, @@ -1472,7 +1474,7 @@ pub struct RpcServerConfig { /// Address where to bind the ws server to ws_addr: Option, /// Configs for JSON-RPC IPC server - ipc_server_config: Option, + ipc_server_config: Option>, /// The Endpoint where to launch the ipc server ipc_endpoint: Option, /// JWT secret for authentication @@ -1508,7 +1510,7 @@ impl RpcServerConfig { } /// Creates a new config with only ipc set - pub fn ipc(config: IpcServerBuilder) -> Self { + pub fn ipc(config: IpcServerBuilder) -> Self { Self::default().with_ipc(config) } @@ -1568,7 +1570,7 @@ impl RpcServerConfig { /// /// Note: this always configures an [EthSubscriptionIdProvider] [IdProvider] for convenience. /// To set a custom [IdProvider], please use [Self::with_id_provider]. - pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { + pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } @@ -1756,13 +1758,12 @@ impl RpcServerConfig { server.ws_http = self.build_ws_http(modules).await?; if let Some(builder) = self.ipc_server_config { - // let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::new).unwrap_or_default(); + let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); let ipc_path = self .ipc_endpoint .unwrap_or_else(|| Endpoint::new(DEFAULT_IPC_ENDPOINT.to_string())); let ipc = builder - // TODO(mattsse): add metrics middleware for IPC - // .set_middleware(metrics) + .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) .build(ipc_path.path()); server.ipc = Some(ipc); } @@ -2127,7 +2128,7 @@ pub struct RpcServer { /// Configured ws,http servers ws_http: WsHttpServer, /// ipc server - ipc: Option, + ipc: Option>>, } // === impl RpcServer === From ce1e401d217215dfdbe6c9de6b87efadf9f804e6 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 23 Apr 2024 17:40:18 +0100 Subject: [PATCH 014/250] docs: add warning regarding `increment_block` necessity on `write_to_storage` (#7816) --- .../src/bundle_state/bundle_state_with_receipts.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 5e595532c..1153464f7 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -316,7 +316,12 @@ impl BundleStateWithReceipts { let mut bodies_cursor = tx.cursor_read::()?; let mut receipts_cursor = tx.cursor_write::()?; - for (idx, receipts) in self.receipts.into_iter().enumerate() { + // ATTENTION: Any potential future refactor or change to how this loop works should keep in + // mind that the static file producer must always call `increment_block` even if the block + // has no receipts. Keeping track of the exact block range of the segment is needed for + // consistency, querying and file range segmentation. + let blocks = self.receipts.into_iter().enumerate(); + for (idx, receipts) in blocks { let block_number = self.first_block + idx as u64; let first_tx_index = bodies_cursor .seek_exact(block_number)? From a8a434d2c4869c8efb520e6b22eacc9b4d3ae439 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 21:06:12 +0200 Subject: [PATCH 015/250] chore: flatten more deps (#7824) --- Cargo.lock | 3 +-- crates/rpc/rpc-engine-api/Cargo.toml | 7 ++++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e34b6c61..9a183217c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7375,10 +7375,9 @@ dependencies = [ "metrics", "reth-beacon-consensus", "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", - "reth-node-optimism", "reth-payload-builder", "reth-primitives", "reth-provider", diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index b7e6eeccb..5fe782a6e 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -39,12 +39,13 @@ tracing.workspace = true serde.workspace = true [dev-dependencies] -alloy-rlp.workspace = true -reth-node-ethereum.workspace = true -reth-node-optimism.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } + +alloy-rlp.workspace = true + assert_matches.workspace = true [features] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 01db0bb11..be9f98832 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -712,8 +712,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use reth_beacon_consensus::BeaconEngineMessage; + use reth_ethereum_engine_primitives::EthEngineTypes; use reth_interfaces::test_utils::generators::random_block; - use reth_node_ethereum::EthEngineTypes; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, B256, MAINNET}; use reth_provider::test_utils::MockEthProvider; From c659e28aa0e3ea5e83341b8a9eead0f0c89c296a Mon Sep 17 00:00:00 2001 From: Kyrylo Riabov Date: Tue, 23 Apr 2024 22:10:45 +0300 Subject: [PATCH 016/250] feat(storage): replace Tree generic with Arc (#7810) Co-authored-by: Matthias Seitz --- .../src/commands/debug_cmd/build_block.rs | 2 +- .../src/commands/debug_cmd/replay_engine.rs | 5 +- .../consensus/beacon/src/engine/test_utils.rs | 12 +-- crates/e2e-test-utils/src/lib.rs | 8 +- crates/node-builder/src/builder.rs | 67 ++++++------- crates/storage/provider/src/providers/mod.rs | 94 ++++++++----------- crates/storage/provider/src/traits/mod.rs | 3 + .../provider/src/traits/tree_viewer.rs | 22 +++++ examples/rpc-db/src/main.rs | 2 +- 9 files changed, 102 insertions(+), 113 deletions(-) create mode 100644 crates/storage/provider/src/traits/tree_viewer.rs diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 7aaef00fa..aee51ee79 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -174,7 +174,7 @@ impl Command { EvmProcessorFactory::new(self.chain.clone(), evm_config), ); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // fetch the best block from the database let best_block = diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 841b9e3c6..d9b6e9865 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -136,11 +136,10 @@ impl Command { EvmProcessorFactory::new(self.chain.clone(), evm_config), ); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Set up the blockchain provider - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone(), blockchain_tree)?; // Set up network let network_secret_path = diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index ff36e871b..67225b7c7 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -43,13 +43,7 @@ type DatabaseEnv = TempDatabase; type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, - BlockchainProvider< - Arc, - ShareableBlockchainTree< - Arc, - EitherExecutorFactory>, - >, - >, + BlockchainProvider>, Arc>, EthEngineTypes, >; @@ -423,9 +417,9 @@ where // Setup blockchain tree let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let tree = ShareableBlockchainTree::new( + let tree = Arc::new(ShareableBlockchainTree::new( BlockchainTree::new(externals, config, None).expect("failed to create tree"), - ); + )); let latest = self.base_config.chain_spec.genesis_header().seal_slow(); let blockchain_provider = BlockchainProvider::with_latest(provider_factory.clone(), tree, latest); diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 043d1e0c6..3c34f76e5 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,15 +1,13 @@ use node::NodeHelper; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - blockchain_tree::ShareableBlockchainTree, builder::{NodeBuilder, NodeConfig, NodeHandle}, - revm::EvmProcessorFactory, tasks::TaskManager, }; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ components::{NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - FullNodeComponentsAdapter, FullNodeTypesAdapter, NodeTypes, + FullNodeComponentsAdapter, FullNodeTypesAdapter, }; use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; @@ -100,12 +98,10 @@ where // Type aliases type TmpDB = Arc>; -type EvmType = EvmProcessorFactory<::Evm>; -type RethProvider = BlockchainProvider>>; type TmpPool = <>>::PoolBuilder as PoolBuilder< TmpNodeAdapter, >>::Pool; -type TmpNodeAdapter = FullNodeTypesAdapter>; +type TmpNodeAdapter = FullNodeTypesAdapter>; /// Type alias for a type of NodeHelper pub type NodeHelperType = NodeHelper, TmpPool>>; diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 3d7d3a044..327d906b3 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -63,11 +63,9 @@ use tokio::sync::{mpsc::unbounded_channel, oneshot}; /// The builtin provider type of the reth node. // Note: we need to hardcode this because custom components might depend on it in associated types. -type RethFullProviderType = - BlockchainProvider>>; +type RethFullProviderType = BlockchainProvider; -type RethFullAdapter = - FullNodeTypesAdapter::Evm>>; +type RethFullAdapter = FullNodeTypesAdapter>; #[cfg_attr(doc, aquamarine::aquamarine)] /// Declaratively construct a node. @@ -278,7 +276,7 @@ where >, > where - N: Node::Evm>>>, + N: Node>>, N::PoolBuilder: PoolBuilder>, N::NetworkBuilder: crate::components::NetworkBuilder< RethFullAdapter, @@ -308,15 +306,14 @@ where Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, > where - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: + NodeComponentsBuilder>>, { NodeBuilder { config: self.config, @@ -339,7 +336,7 @@ impl Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -347,9 +344,7 @@ impl where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, Types: NodeTypes, - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: NodeComponentsBuilder>>, { /// Apply a function to the components builder. pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { @@ -371,7 +366,7 @@ where where F: Fn( FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, ) -> eyre::Result<()> @@ -388,7 +383,7 @@ where F: Fn( FullNode< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -407,7 +402,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -427,7 +422,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -449,7 +444,7 @@ where F: Fn( ExExContext< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -476,7 +471,7 @@ where ) -> eyre::Result< NodeHandle< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -556,7 +551,7 @@ where .with_sync_metrics_tx(sync_metrics_tx.clone()); let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); debug!(target: "reth::cli", "configured blockchain tree"); // fetch the head block from the database @@ -995,7 +990,7 @@ where >, > where - N: Node::Evm>>>, + N: Node>>, N::PoolBuilder: PoolBuilder>, N::NetworkBuilder: crate::components::NetworkBuilder< RethFullAdapter, @@ -1032,7 +1027,7 @@ where >, > where - N: Node::Evm>>>, + N: Node>>, N::PoolBuilder: PoolBuilder>, N::NetworkBuilder: crate::components::NetworkBuilder< RethFullAdapter, @@ -1065,15 +1060,14 @@ where Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, > where - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: + NodeComponentsBuilder>>, { WithLaunchContext { builder: self.builder.with_components(components_builder), @@ -1090,7 +1084,7 @@ impl Types, Components, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1098,9 +1092,7 @@ impl where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, Types: NodeTypes, - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, + Components: NodeComponentsBuilder>>, { /// Apply a function to the components builder. pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { @@ -1116,7 +1108,7 @@ where where F: Fn( FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, ) -> eyre::Result<()> @@ -1133,7 +1125,7 @@ where F: Fn( FullNode< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1152,7 +1144,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1172,7 +1164,7 @@ where RpcContext< '_, FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1190,7 +1182,7 @@ where F: Fn( ExExContext< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1210,7 +1202,7 @@ where ) -> eyre::Result< NodeHandle< FullNodeComponentsAdapter< - FullNodeTypesAdapter>, + FullNodeTypesAdapter>, Components::Pool, >, >, @@ -1391,13 +1383,12 @@ impl std::fmt::Debug for BuilderContext { pub struct InitState; /// The state after all types of the node have been configured. -#[derive(Debug)] pub struct TypesState where DB: Database + Clone + 'static, Types: NodeTypes, { - adapter: FullNodeTypesAdapter>, + adapter: FullNodeTypesAdapter>, } /// The state of the node builder process after the node's components have been configured. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b3011a9b0..c9ebd042c 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -4,7 +4,8 @@ use crate::{ CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StateProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; use reth_db::{ database::Database, @@ -67,31 +68,36 @@ use reth_rpc_types::engine::ForkchoiceState; /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. -#[derive(Clone, Debug)] -pub struct BlockchainProvider { +#[derive(Clone)] +#[allow(missing_debug_implementations)] +pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Tree, + tree: Arc, /// Tracks the chain info wrt forkchoice updates chain_info: ChainInfoTracker, } -impl BlockchainProvider { +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker. - pub fn with_latest(database: ProviderFactory, tree: Tree, latest: SealedHeader) -> Self { + pub fn with_latest( + database: ProviderFactory, + tree: Arc, + latest: SealedHeader, + ) -> Self { Self { database, tree, chain_info: ChainInfoTracker::new(latest) } } } -impl BlockchainProvider +impl BlockchainProvider where DB: Database, { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Tree) -> ProviderResult { + pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; let best: ChainInfo = provider.chain_info()?; match provider.header_by_number(best.best_number)? { @@ -104,10 +110,9 @@ where } } -impl BlockchainProvider +impl BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer, { /// Ensures that the given block number is canonical (synced) /// @@ -128,7 +133,7 @@ where } } -impl DatabaseProviderFactory for BlockchainProvider +impl DatabaseProviderFactory for BlockchainProvider where DB: Database, { @@ -137,10 +142,9 @@ where } } -impl HeaderProvider for BlockchainProvider +impl HeaderProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) @@ -182,10 +186,9 @@ where } } -impl BlockHashReader for BlockchainProvider +impl BlockHashReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn block_hash(&self, number: u64) -> ProviderResult> { self.database.block_hash(number) @@ -200,10 +203,9 @@ where } } -impl BlockNumReader for BlockchainProvider +impl BlockNumReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn chain_info(&self) -> ProviderResult { Ok(self.chain_info.chain_info()) @@ -222,10 +224,9 @@ where } } -impl BlockIdReader for BlockchainProvider +impl BlockIdReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.tree.pending_block_num_hash()) @@ -240,10 +241,9 @@ where } } -impl BlockReader for BlockchainProvider +impl BlockReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { let block = match source { @@ -320,10 +320,9 @@ where } } -impl TransactionsProvider for BlockchainProvider +impl TransactionsProvider for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) @@ -388,10 +387,9 @@ where } } -impl ReceiptProvider for BlockchainProvider +impl ReceiptProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) @@ -412,10 +410,10 @@ where self.database.receipts_by_tx_range(range) } } -impl ReceiptProviderIdExt for BlockchainProvider + +impl ReceiptProviderIdExt for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { @@ -440,10 +438,9 @@ where } } -impl WithdrawalsProvider for BlockchainProvider +impl WithdrawalsProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn withdrawals_by_block( &self, @@ -458,10 +455,9 @@ where } } -impl StageCheckpointReader for BlockchainProvider +impl StageCheckpointReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) @@ -472,10 +468,9 @@ where } } -impl EvmEnvProvider for BlockchainProvider +impl EvmEnvProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn fill_env_at( &self, @@ -544,10 +539,9 @@ where } } -impl PruneCheckpointReader for BlockchainProvider +impl PruneCheckpointReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn get_prune_checkpoint( &self, @@ -557,20 +551,18 @@ where } } -impl ChainSpecProvider for BlockchainProvider +impl ChainSpecProvider for BlockchainProvider where DB: Send + Sync, - Tree: Send + Sync, { fn chain_spec(&self) -> Arc { self.database.chain_spec() } } -impl StateProviderFactory for BlockchainProvider +impl StateProviderFactory for BlockchainProvider where DB: Database, - Tree: BlockchainTreePendingStateProvider + BlockchainTreeViewer, { /// Storage provider for latest block fn latest(&self) -> ProviderResult { @@ -644,10 +636,9 @@ where } } -impl BlockchainTreeEngine for BlockchainProvider +impl BlockchainTreeEngine for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreeEngine, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { self.tree.buffer_block(block) @@ -681,10 +672,9 @@ where } } -impl BlockchainTreeViewer for BlockchainProvider +impl BlockchainTreeViewer for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreeViewer, { fn blocks(&self) -> BTreeMap> { self.tree.blocks() @@ -743,10 +733,9 @@ where } } -impl CanonChainTracker for BlockchainProvider +impl CanonChainTracker for BlockchainProvider where DB: Send + Sync, - Tree: Send + Sync, Self: BlockReader, { fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -779,10 +768,9 @@ where } } -impl BlockReaderIdExt for BlockchainProvider +impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, - Tree: BlockchainTreeEngine, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { @@ -859,10 +847,9 @@ where } } -impl BlockchainTreePendingStateProvider for BlockchainProvider +impl BlockchainTreePendingStateProvider for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreePendingStateProvider, { fn find_pending_state_provider( &self, @@ -872,20 +859,18 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider +impl CanonStateSubscriptions for BlockchainProvider where DB: Send + Sync, - Tree: CanonStateSubscriptions, { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } -impl ChangeSetReader for BlockchainProvider +impl ChangeSetReader for BlockchainProvider where DB: Database, - Tree: Sync + Send, { fn account_block_changeset( &self, @@ -895,10 +880,9 @@ where } } -impl AccountReader for BlockchainProvider +impl AccountReader for BlockchainProvider where DB: Database + Sync + Send, - Tree: Sync + Send, { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c9623cb0c..44884acb0 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -80,3 +80,6 @@ pub use stats::StatsReader; mod full; pub use full::FullProvider; + +mod tree_viewer; +pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs new file mode 100644 index 000000000..db3b19c4d --- /dev/null +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -0,0 +1,22 @@ +use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; + +use reth_interfaces::blockchain_tree::{BlockchainTreeEngine, BlockchainTreeViewer}; + +/// Helper trait to combine all the traits we need for the BlockchainProvider +/// +/// This is a temporary solution +pub trait TreeViewer: + BlockchainTreeViewer + + BlockchainTreePendingStateProvider + + CanonStateSubscriptions + + BlockchainTreeEngine +{ +} + +impl TreeViewer for T where + T: BlockchainTreeViewer + + BlockchainTreePendingStateProvider + + CanonStateSubscriptions + + BlockchainTreeEngine +{ +} diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index b8286e51b..627da093c 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -49,7 +49,7 @@ async fn main() -> eyre::Result<()> { // 2. Setup the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the // disk and don't handle new blocks/live sync etc, which is done by the blockchain tree. - let provider = BlockchainProvider::new(factory, NoopBlockchainTree::default())?; + let provider = BlockchainProvider::new(factory, Arc::new(NoopBlockchainTree::default()))?; let rpc_builder = RpcModuleBuilder::default() .with_provider(provider.clone()) From cfeead75985ebfc1e5cb5fefe4ad51f70cf2d6d2 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:55:09 -0400 Subject: [PATCH 017/250] add `reth-evm-optimism` (#7821) --- Cargo.lock | 14 + Cargo.toml | 4 +- crates/optimism/evm/Cargo.toml | 37 ++ crates/optimism/evm/src/execute.rs | 744 ++++++++++++++++++++++++++++ crates/optimism/evm/src/lib.rs | 107 ++++ crates/optimism/node/Cargo.toml | 6 +- crates/optimism/node/src/evm/mod.rs | 94 ---- crates/optimism/node/src/lib.rs | 7 +- crates/optimism/node/src/node.rs | 3 +- crates/rpc/rpc/Cargo.toml | 2 +- 10 files changed, 914 insertions(+), 104 deletions(-) create mode 100644 crates/optimism/evm/Cargo.toml create mode 100644 crates/optimism/evm/src/execute.rs create mode 100644 crates/optimism/evm/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9a183217c..9c450fe70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6642,6 +6642,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-evm-optimism" +version = "0.2.0-beta.6" +dependencies = [ + "reth-evm", + "reth-interfaces", + "reth-primitives", + "reth-provider", + "reth-revm", + "revm-primitives", + "tracing", +] + [[package]] name = "reth-exex" version = "0.2.0-beta.6" @@ -7049,6 +7062,7 @@ dependencies = [ "reth-db", "reth-e2e-test-utils", "reth-evm", + "reth-evm-optimism", "reth-interfaces", "reth-network", "reth-node-api", diff --git a/Cargo.toml b/Cargo.toml index 1fb403e1b..7ef645f39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ members = [ "crates/node-ethereum/", "crates/node-builder/", "crates/optimism/node/", + "crates/optimism/evm/", "crates/node-core/", "crates/node/api/", "crates/stages/", @@ -85,7 +86,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", - "testing/ef-tests/" + "testing/ef-tests/", ] default-members = ["bin/reth"] @@ -220,6 +221,7 @@ reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/optimism/node" } +reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } reth-node-api = { path = "crates/node/api" } reth-downloaders = { path = "crates/net/downloaders" } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml new file mode 100644 index 000000000..fbffa1245 --- /dev/null +++ b/crates/optimism/evm/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "reth-evm-optimism" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# Reth +reth-evm.workspace = true +reth-primitives.workspace = true +reth-revm.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true + +# Optimism +revm-primitives.workspace = true + +# misc +tracing.workspace = true + +[dev-dependencies] +reth-revm = { workspace = true, features = ["test-utils"] } + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-revm/optimism", + "reth-interfaces/optimism", + "revm-primitives/optimism", +] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs new file mode 100644 index 000000000..ef87cce1d --- /dev/null +++ b/crates/optimism/evm/src/execute.rs @@ -0,0 +1,744 @@ +//! Optimism block executor. + +use crate::OptimismEvmConfig; +use reth_evm::{ + execute::{ + BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, + ExecutorProvider, + }, + ConfigureEvm, ConfigureEvmEnv, +}; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, + provider::ProviderError, +}; +use reth_primitives::{ + proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, + GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, + Withdrawals, B256, U256, +}; +use reth_provider::BundleStateWithReceipts; +use reth_revm::{ + batch::{BlockBatchRecord, BlockExecutorStats}, + db::states::bundle_state::BundleRetention, + optimism::ensure_create2_deployer, + processor::compare_receipts_root_and_logs_bloom, + stack::InspectorStack, + state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, + Evm, State, +}; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, +}; +use std::sync::Arc; +use tracing::{debug, trace}; + +/// Provides executors to execute regular ethereum blocks +#[derive(Debug, Clone)] +pub struct OpExecutorProvider { + chain_spec: Arc, + evm_config: EvmConfig, + inspector: Option, + prune_modes: PruneModes, +} + +impl OpExecutorProvider { + /// Creates a new default optimism executor provider. + pub fn optimism(chain_spec: Arc) -> Self { + Self::new(chain_spec, Default::default()) + } +} + +impl OpExecutorProvider { + /// Creates a new executor provider. + pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { + Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + } + + /// Configures an optional inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + /// Configures the prune modes for the executor. + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } +} + +impl OpExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + fn op_executor(&self, db: DB) -> OpBlockExecutor + where + DB: Database, + { + OpBlockExecutor::new( + self.chain_spec.clone(), + self.evm_config.clone(), + State::builder().with_database(db).with_bundle_update().without_state_clear().build(), + ) + .with_inspector(self.inspector.clone()) + } +} + +impl ExecutorProvider for OpExecutorProvider +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + type Executor> = OpBlockExecutor; + + type BatchExecutor> = OpBatchExecutor; + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + self.op_executor(db) + } + + fn batch_executor(&self, db: DB) -> Self::BatchExecutor + where + DB: Database, + { + let executor = self.op_executor(db); + OpBatchExecutor { + executor, + batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + stats: BlockExecutorStats::default(), + } + } +} + +/// Helper container type for EVM with chain spec. +#[derive(Debug, Clone)] +struct OpEvmExecutor { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EvmConfig, +} + +impl OpEvmExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, +{ + /// Executes the transactions in the block and returns the receipts. + /// + /// This applies the pre-execution changes, and executes the transactions. + /// + /// # Note + /// + /// It does __not__ apply post-execution changes. + fn execute_pre_and_transactions( + &mut self, + block: &BlockWithSenders, + mut evm: Evm<'_, Ext, &mut State>, + ) -> Result<(Vec, u64), BlockExecutionError> + where + DB: Database, + { + // apply pre execution changes + apply_beacon_root_contract_call( + &self.chain_spec, + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + )?; + + // execute transactions + let is_regolith = + self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); + + // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + // blocks will always have at least a single transaction in them (the L1 info transaction), + // so we can safely assume that this will always be triggered upon the transition and that + // the above check for empty blocks will never be hit on OP chains. + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( + |_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::ForceCreate2DeployerFail, + ) + }, + )?; + + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.len()); + for (sender, transaction) in block.transactions_with_sender() { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas && + (is_regolith || !transaction.is_system_transaction()) + { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()); + } + + // An optimism block should never contain blob transactions. + if matches!(transaction.tx_type(), TxType::Eip4844) { + return Err(BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::BlobTransactionRejected, + )); + } + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (is_regolith && transaction.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(*sender) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| { + BlockExecutionError::OptimismBlockExecution( + OptimismBlockExecutionError::AccountLoadFailed(*sender), + ) + })?; + + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); + + // Execute transaction. + let ResultAndState { result, state } = evm.transact().map_err(move |err| { + // Ensure hash is calculated for error log, if not already done + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: err.into(), + } + })?; + + trace!( + target: "evm", + ?transaction, + "Executed transaction" + ); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process ensures + // this is only set for post-Canyon deposit transactions. + deposit_receipt_version: (transaction.is_deposit() && + self.chain_spec + .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) + .then_some(1), + }); + } + drop(evm); + + // Check if gas used matches the value set in header. + if block.gas_used != cumulative_gas_used { + let receipts = Receipts::from_block_receipt(receipts); + return Err(BlockValidationError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: receipts.gas_spent_by_tx()?, + } + .into()); + } + + Ok((receipts, cumulative_gas_used)) + } +} + +/// A basic Ethereum block executor. +/// +/// Expected usage: +/// - Create a new instance of the executor. +/// - Execute the block. +#[derive(Debug)] +pub struct OpBlockExecutor { + /// Chain specific evm config that's used to execute a block. + executor: OpEvmExecutor, + /// The state to use for execution + state: State, + /// Optional inspector stack for debugging + inspector: Option, +} + +impl OpBlockExecutor { + /// Creates a new Ethereum block executor. + pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { + Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } + } + + /// Sets the inspector stack for debugging. + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; + self + } + + #[inline] + fn chain_spec(&self) -> &ChainSpec { + &self.executor.chain_spec + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +impl OpBlockExecutor +where + EvmConfig: ConfigureEvm, + // TODO(mattsse): get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + EvmConfig::fill_cfg_and_block_env( + &mut cfg, + &mut block_env, + self.chain_spec(), + header, + total_difficulty, + ); + + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } + + /// Execute a single block and apply the state changes to the internal state. + /// + /// Returns the receipts of the transactions in the block and the total gas used. + /// + /// Returns an error if execution fails or receipt verification fails. + fn execute_and_verify( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), BlockExecutionError> { + // 1. prepare state on new block + self.on_new_block(&block.header); + + // 2. configure the evm and execute + let env = self.evm_env_for_block(&block.header, total_difficulty); + + let (receipts, gas_used) = { + if let Some(inspector) = self.inspector.as_mut() { + let evm = self.executor.evm_config.evm_with_env_and_inspector( + &mut self.state, + env, + inspector, + ); + self.executor.execute_pre_and_transactions(block, evm)? + } else { + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + + self.executor.execute_pre_and_transactions(block, evm)? + } + }; + + // 3. apply post execution changes + self.post_execution(block, total_difficulty)?; + + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if self.chain_spec().is_byzantium_active_at_block(block.header.number) { + if let Err(error) = verify_receipt_optimism( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + self.chain_spec(), + block.timestamp, + ) { + debug!(target: "evm", %error, ?receipts, "receipts verification failed"); + return Err(error); + }; + } + + Ok((receipts, gas_used)) + } + + /// Apply settings before a new block is executed. + pub(crate) fn on_new_block(&mut self, header: &Header) { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); + self.state.set_state_clear_flag(state_clear_flag); + } + + /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO + /// hardfork state change. + pub fn post_execution( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(), BlockExecutionError> { + let balance_increments = post_block_balance_increments( + self.chain_spec(), + block.number, + block.difficulty, + block.beneficiary, + block.timestamp, + total_difficulty, + &block.ommers, + block.withdrawals.as_ref().map(Withdrawals::as_ref), + ); + // increment balances + self.state + .increment_balances(balance_increments) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(()) + } +} + +impl Executor for OpBlockExecutor +where + EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = EthBlockOutput; + type Error = BlockExecutionError; + + /// Executes the block and commits the state changes. + /// + /// Returns the receipts of the transactions in the block. + /// + /// Returns an error if the block could not be executed or failed verification. + /// + /// State changes are committed to the database. + fn execute(mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + + // prepare the state for extraction + self.state.merge_transitions(BundleRetention::PlainState); + + Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + } +} + +/// An executor for a batch of blocks. +/// +/// State changes are tracked until the executor is finalized. +#[derive(Debug)] +pub struct OpBatchExecutor { + /// The executor used to execute blocks. + executor: OpBlockExecutor, + /// Keeps track of the batch and record receipts based on the configured prune mode + batch_record: BlockBatchRecord, + stats: BlockExecutorStats, +} + +impl OpBatchExecutor { + /// Returns the receipts of the executed blocks. + pub fn receipts(&self) -> &Receipts { + self.batch_record.receipts() + } + + /// Returns mutable reference to the state that wraps the underlying database. + #[allow(unused)] + fn state_mut(&mut self) -> &mut State { + self.executor.state_mut() + } +} + +impl BatchExecutor for OpBatchExecutor +where + EvmConfig: ConfigureEvm, + // TODO: get rid of this + EvmConfig: ConfigureEvmEnv, + DB: Database, +{ + type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; + type Output = BundleStateWithReceipts; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result { + let EthBlockExecutionInput { block, total_difficulty } = input; + let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + + // prepare the state according to the prune mode + let retention = self.batch_record.bundle_retention(block.number); + self.executor.state.merge_transitions(retention); + + // store receipts in the set + self.batch_record.save_receipts(receipts)?; + + Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + } + + fn finalize(mut self) -> Self::Output { + // TODO: track stats + self.stats.log_debug(); + + BundleStateWithReceipts::new( + self.executor.state.take_bundle(), + self.batch_record.take_receipts(), + self.batch_record.first_block().unwrap_or_default(), + ) + } +} + +/// Verify the calculated receipts root against the expected receipts root. +pub fn verify_receipt_optimism<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{ + b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, + Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + }; + use reth_revm::{database::StateProviderDatabase, L1_BLOCK_CONTRACT}; + use std::{collections::HashMap, str::FromStr}; + + use crate::OptimismEvmConfig; + use reth_revm::test_utils::StateProviderTest; + + fn create_op_state_provider() -> StateProviderTest { + let mut db = StateProviderTest::default(); + + let l1_block_contract_account = + Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; + + let mut l1_block_storage = HashMap::new(); + // base fee + l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); + // l1 fee overhead + l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); + // l1 fee scalar + l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); + // l1 free scalars post ecotone + l1_block_storage.insert( + StorageKey::with_last_byte(3), + StorageValue::from_str( + "0x0000000000000000000000000000000000001db0000d27300000000000000005", + ) + .unwrap(), + ); + + db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); + + db + } + + fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { + OpExecutorProvider { + chain_spec, + evm_config: Default::default(), + inspector: None, + prune_modes: Default::default(), + } + } + + #[test] + fn op_deposit_fields_pre_canyon() { + let header = Header { + timestamp: 1, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + db.insert_account(addr, account, None, HashMap::new()); + + let chain_spec = + Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21_000, + to: TransactionKind::Call(addr), + ..Default::default() + }), + Signature::default(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(reth_primitives::TxDeposit { + from: addr, + to: TransactionKind::Call(addr), + gas_limit: 21_000, + ..Default::default() + }), + Signature::default(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + + // Attempt to execute a block with one deposit and one non-deposit transaction + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header, + body: vec![tx, tx_deposit], + ommers: vec![], + withdrawals: None, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .unwrap(); + + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + + // deposit_receipt_version is not present in pre canyon transactions + assert!(deposit_receipt.deposit_receipt_version.is_none()); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } + + #[test] + fn op_deposit_fields_post_canyon() { + // ensure_create2_deployer will fail if timestamp is set to less then 2 + let header = Header { + timestamp: 2, + number: 1, + gas_limit: 1_000_000, + gas_used: 42_000, + receipts_root: b256!( + "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" + ), + ..Default::default() + }; + + let mut db = create_op_state_provider(); + let addr = Address::ZERO; + let account = Account { balance: U256::MAX, ..Account::default() }; + + db.insert_account(addr, account, None, HashMap::new()); + + let chain_spec = + Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); + + let tx = TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21_000, + to: TransactionKind::Call(addr), + ..Default::default() + }), + Signature::default(), + ); + + let tx_deposit = TransactionSigned::from_transaction_and_signature( + Transaction::Deposit(reth_primitives::TxDeposit { + from: addr, + to: TransactionKind::Call(addr), + gas_limit: 21_000, + ..Default::default() + }), + Signature::optimism_deposit_tx_signature(), + ); + + let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + + executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_one( + ( + &BlockWithSenders { + block: Block { + header, + body: vec![tx, tx_deposit], + ommers: vec![], + withdrawals: None, + }, + senders: vec![addr, addr], + }, + U256::ZERO, + ) + .into(), + ) + .expect("Executing a block while canyon is active should not fail"); + + let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); + let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + + // deposit_receipt_version is set to 1 for post canyon deposit transactions + assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); + assert!(tx_receipt.deposit_receipt_version.is_none()); + + // deposit_nonce is present only in deposit transactions + assert!(deposit_receipt.deposit_nonce.is_some()); + assert!(tx_receipt.deposit_nonce.is_none()); + } +} diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs new file mode 100644 index 000000000..6a6324302 --- /dev/null +++ b/crates/optimism/evm/src/lib.rs @@ -0,0 +1,107 @@ +//! EVM config for vanilla optimism. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::{ + revm::{config::revm_spec, env::fill_op_tx_env}, + revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Address, Bytes, ChainSpec, Head, Header, Transaction, U256, +}; +use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; + +mod execute; +pub use execute::*; + +/// Optimism-related EVM configuration. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct OptimismEvmConfig; + +impl ConfigureEvmEnv for OptimismEvmConfig { + type TxMeta = Bytes; + + fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) + where + T: AsRef, + { + fill_op_tx_env(tx_env, transaction, sender, meta); + } + + fn fill_cfg_env( + cfg_env: &mut CfgEnvWithHandlerCfg, + chain_spec: &ChainSpec, + header: &Header, + total_difficulty: U256, + ) { + let spec_id = revm_spec( + chain_spec, + Head { + number: header.number, + timestamp: header.timestamp, + difficulty: header.difficulty, + total_difficulty, + hash: Default::default(), + }, + ); + + cfg_env.chain_id = chain_spec.chain().id(); + cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; + + cfg_env.handler_cfg.spec_id = spec_id; + cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); + } +} + +impl ConfigureEvm for OptimismEvmConfig { + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + EvmBuilder::default().with_db(db).optimism().build() + } + + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + where + DB: Database + 'a, + I: GetInspector, + { + EvmBuilder::default() + .with_db(db) + .with_external_context(inspector) + .optimism() + .append_handler_register(inspector_handle_register) + .build() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; + use reth_revm::primitives::SpecId; + + #[test] + #[ignore] + fn test_fill_cfg_and_block_env() { + let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); + let mut block_env = BlockEnv::default(); + let header = Header::default(); + let chain_spec = ChainSpec::default(); + let total_difficulty = U256::ZERO; + + OptimismEvmConfig::fill_cfg_and_block_env( + &mut cfg_env, + &mut block_env, + &chain_spec, + &header, + total_difficulty, + ); + + assert_eq!(cfg_env.chain_id, chain_spec.chain().id()); + } +} diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index f242adf5a..36bfe96b5 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -28,6 +28,7 @@ reth-network.workspace = true reth-interfaces.workspace = true reth-evm.workspace = true reth-revm.workspace = true +reth-evm-optimism.workspace = true reth-beacon-consensus.workspace = true revm.workspace = true revm-primitives.workspace = true @@ -39,7 +40,7 @@ http.workspace = true http-body.workspace = true reqwest = { version = "0.11", default-features = false, features = [ "rustls-tls", -]} +] } tracing.workspace = true # misc @@ -54,7 +55,7 @@ jsonrpsee.workspace = true [dev-dependencies] reth.workspace = true reth-db.workspace = true -reth-revm = { workspace = true, features = ["test-utils"]} +reth-revm = { workspace = true, features = ["test-utils"] } reth-e2e-test-utils.workspace = true tokio.workspace = true alloy-primitives.workspace = true @@ -66,6 +67,7 @@ optimism = [ "reth-rpc-types-compat/optimism", "reth-rpc/optimism", "reth-revm/optimism", + "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/optimism/node/src/evm/mod.rs b/crates/optimism/node/src/evm/mod.rs index 086253a0d..139597f9c 100644 --- a/crates/optimism/node/src/evm/mod.rs +++ b/crates/optimism/node/src/evm/mod.rs @@ -1,96 +1,2 @@ -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm::{config::revm_spec, env::fill_op_tx_env}, - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Bytes, ChainSpec, Head, Header, Transaction, U256, -}; -use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -mod execute; -pub use execute::*; -/// Optimism-related EVM configuration. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct OptimismEvmConfig; - -impl ConfigureEvmEnv for OptimismEvmConfig { - type TxMeta = Bytes; - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) - where - T: AsRef, - { - fill_op_tx_env(tx_env, transaction, sender, meta); - } - - fn fill_cfg_env( - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = revm_spec( - chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); - - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; - - cfg_env.handler_cfg.spec_id = spec_id; - cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); - } -} - -impl ConfigureEvm for OptimismEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - EvmBuilder::default().with_db(db).optimism().build() - } - - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> - where - DB: Database + 'a, - I: GetInspector, - { - EvmBuilder::default() - .with_db(db) - .with_external_context(inspector) - .optimism() - .append_handler_register(inspector_handle_register) - .build() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; - use revm::primitives::SpecId; - - #[test] - #[ignore] - fn test_fill_cfg_and_block_env() { - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - let mut block_env = BlockEnv::default(); - let header = Header::default(); - let chain_spec = ChainSpec::default(); - let total_difficulty = U256::ZERO; - - OptimismEvmConfig::fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &chain_spec, - &header, - total_difficulty, - ); - - assert_eq!(cfg_env.chain_id, chain_spec.chain().id()); - } -} diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index e75b03890..7fc1c34b6 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -17,11 +17,6 @@ pub mod args; pub mod engine; pub use engine::OptimismEngineTypes; -/// Exports optimism-specific implementations of the -/// [ConfigureEvmEnv](reth_node_api::ConfigureEvmEnv) trait. -pub mod evm; -pub use evm::OptimismEvmConfig; - pub mod node; pub use node::OptimismNode; @@ -32,3 +27,5 @@ pub mod rpc; pub use reth_optimism_payload_builder::{ OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, }; + +pub use reth_evm_optimism::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0c365ccc7..0d6e4996a 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -3,9 +3,10 @@ use crate::{ args::RollupArgs, txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, OptimismEvmConfig, + OptimismEngineTypes, }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm_optimism::OptimismEvmConfig; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 357309de7..d5bd324ae 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -74,7 +74,7 @@ tracing-futures = "0.2" schnellru.workspace = true futures.workspace = true derive_more.workspace = true -dyn-clone.workspace = true +dyn-clone.workspace = true [dev-dependencies] reth-evm-ethereum.workspace = true From a75d6cd753a6988ff6c1033b24e485d65b4c3b48 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 23:10:58 +0200 Subject: [PATCH 018/250] chore: rm dbg (#7827) --- crates/rpc/rpc-engine-api/src/error.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 8a7790cf0..01b99a54f 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -222,7 +222,6 @@ mod tests { err: impl Into>, ) { let err = err.into(); - dbg!(&err); assert_eq!(err.code(), code); assert_eq!(err.message(), message); } From 0f9145b728d2079d9c0fd0e6cd997f5641e3a675 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 23 Apr 2024 23:17:36 +0200 Subject: [PATCH 019/250] chore: add CanonStateSubscriptions to NoopTree (#7828) --- crates/blockchain-tree/src/noop.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index eff385fb6..9fa820255 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -22,7 +22,10 @@ use std::collections::{BTreeMap, HashSet}; /// Caution: this is only intended for testing purposes, or for wiring components together. #[derive(Debug, Clone, Default)] #[non_exhaustive] -pub struct NoopBlockchainTree {} +pub struct NoopBlockchainTree { + /// Broadcast channel for canon state changes notifications. + pub canon_state_notification_sender: Option, +} impl BlockchainTreeEngine for NoopBlockchainTree { fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -127,6 +130,9 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - CanonStateNotificationSender::new(1).subscribe() + self.canon_state_notification_sender + .as_ref() + .map(|sender| sender.subscribe()) + .unwrap_or_else(|| CanonStateNotificationSender::new(1).subscribe()) } } From 665e67ec7cfdbaa00dc4d1068c8400d50c6cc2b5 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 23 Apr 2024 23:30:34 +0200 Subject: [PATCH 020/250] ci: use `HOMEBREW` secret (#7829) --- .github/workflows/release-dist.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 5989a532c..2142360e0 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -14,7 +14,7 @@ jobs: - name: Update Homebrew formula uses: dawidd6/action-homebrew-bump-formula@v3 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ secrets.HOMEBREW }} no_fork: true tap: paradigmxyz/brew formula: reth From 7a593882e1735d76772cc8685d4ff597f9bbf114 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 24 Apr 2024 05:32:59 +0800 Subject: [PATCH 021/250] chore(github): use codespell to inspect and correct spelling issues (#7775) Signed-off-by: jsvisa Co-authored-by: Matthias Seitz --- .codespellrc | 3 +++ .github/workflows/lint.yml | 8 +++++++- Makefile | 12 +++++++++++- bin/reth/src/commands/stage/dump/hashing_account.rs | 2 +- bin/reth/src/commands/stage/dump/hashing_storage.rs | 2 +- bin/reth/src/commands/stage/dump/merkle.rs | 2 +- bin/reth/src/commands/test_vectors/tables.rs | 4 ++-- crates/engine-primitives/src/error.rs | 6 ++++-- crates/ethereum/engine-primitives/src/lib.rs | 2 +- crates/net/ecies/src/error.rs | 4 ++-- crates/net/network/src/transactions/validation.rs | 2 +- crates/node-builder/src/components/builder.rs | 2 +- crates/node-core/src/args/rpc_server_args.rs | 2 +- crates/rpc/rpc-api/src/engine.rs | 2 +- crates/rpc/rpc-api/src/ganache.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 4 ++-- crates/rpc/rpc/src/eth/signer.rs | 2 +- crates/rpc/rpc/src/lib.rs | 2 +- crates/static-file/README.md | 2 +- crates/storage/db/benches/hash_keys.rs | 6 +++--- crates/storage/db/src/abstraction/common.rs | 2 +- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/nippy-jar/src/lib.rs | 2 +- .../provider/src/providers/state/historical.rs | 2 +- crates/storage/provider/src/traits/transactions.rs | 2 +- crates/transaction-pool/src/pool/pending.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 4 ++-- 28 files changed, 55 insertions(+), 34 deletions(-) create mode 100644 .codespellrc diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 000000000..771985af1 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = .git,target,./crates/storage/libmdbx-rs/mdbx-sys/libmdbx,Cargo.toml,Cargo.lock +ignore-words-list = crate,ser,ratatui diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1f1f7a13c..ff3dad495 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -110,6 +110,12 @@ jobs: components: rustfmt - run: cargo fmt --all --check + codespell: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: codespell-project/actions-codespell@v2 + grafana: runs-on: ubuntu-latest timeout-minutes: 30 @@ -124,7 +130,7 @@ jobs: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/Makefile b/Makefile index c8adf4ff9..187de174d 100644 --- a/Makefile +++ b/Makefile @@ -303,11 +303,21 @@ lint-other-targets: --all-features \ -- -D warnings +lint-codespell: ensure-codespell + codespell + +ensure-codespell: + @if ! command -v codespell &> /dev/null; then \ + echo "codespell not found. Please install it by running the command `pip install codespell` or refer to the following link for more information: https://github.com/codespell-project/codespell" \ + exit 1; \ + fi + lint: make fmt && \ make lint-reth && \ make lint-op-reth && \ - make lint-other-targets + make lint-other-targets \ + make lint-codespell fix-lint-reth: cargo +nightly clippy \ diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/bin/reth/src/commands/stage/dump/hashing_account.rs index 1888f0e30..35bbfa4d7 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/bin/reth/src/commands/stage/dump/hashing_account.rs @@ -69,7 +69,7 @@ fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/bin/reth/src/commands/stage/dump/hashing_storage.rs index 7f827b25c..a7e387880 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/bin/reth/src/commands/stage/dump/hashing_storage.rs @@ -65,7 +65,7 @@ fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 08ac0a3aa..2dfd0172b 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -138,7 +138,7 @@ async fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/bin/reth/src/commands/test_vectors/tables.rs index 6399c81ac..181ed0e3e 100644 --- a/bin/reth/src/commands/test_vectors/tables.rs +++ b/bin/reth/src/commands/test_vectors/tables.rs @@ -81,7 +81,7 @@ where { let mut rows = vec![]; let mut seen_keys = HashSet::new(); - let strat = proptest::collection::vec( + let strategy = proptest::collection::vec( any_with::>(( ::Parameters::default(), ::Parameters::default(), @@ -94,7 +94,7 @@ where while rows.len() < per_table { // Generate all `per_table` rows: (Key, Value) rows.extend( - &mut strat + &mut strategy .new_tree(runner) .map_err(|e| eyre::eyre!("{e}"))? .current() diff --git a/crates/engine-primitives/src/error.rs b/crates/engine-primitives/src/error.rs index d6549a516..f6dd3a8b7 100644 --- a/crates/engine-primitives/src/error.rs +++ b/crates/engine-primitives/src/error.rs @@ -8,11 +8,13 @@ use thiserror::Error; /// both execution payloads and forkchoice update attributes with respect to a method version. #[derive(Error, Debug)] pub enum EngineObjectValidationError { - /// Thrown when the underlying validation error occured while validating an `ExecutionPayload`. + /// Thrown when the underlying validation error occurred while validating an + /// `ExecutionPayload`. #[error("Payload validation error: {0}")] Payload(VersionSpecificValidationError), - /// Thrown when the underlying validation error occured while validating a `PayloadAttributes`. + /// Thrown when the underlying validation error occurred while validating a + /// `PayloadAttributes`. #[error("Payload attributes validation error: {0}")] PayloadAttributes(VersionSpecificValidationError), diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 6b030a9c5..cb6d0231e 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum specifc +//! Ethereum specific #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index d87545871..64526f16d 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -81,8 +81,8 @@ pub enum ECIESErrorImpl { /// a message from the (partially filled) buffer. #[error("stream closed due to not being readable")] UnreadableStream, - // Error when data is not recieved from peer for a prolonged period. - #[error("never recieved data from remote peer")] + // Error when data is not received from peer for a prolonged period. + #[error("never received data from remote peer")] StreamTimeout, } diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index e508b2b24..9171004bd 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -21,7 +21,7 @@ pub const SIGNATURE_DECODED_SIZE_BYTES: usize = mem::size_of::(); pub trait ValidateTx68 { /// Validates a [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) /// entry. Returns [`ValidationOutcome`] which signals to the caller whether to fetch the - /// transaction or wether to drop it, and whether the sender of the announcement should be + /// transaction or to drop it, and whether the sender of the announcement should be /// penalized. fn should_fetch( &self, diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs index 6abdca96c..14bdf7a4a 100644 --- a/crates/node-builder/src/components/builder.rs +++ b/crates/node-builder/src/components/builder.rs @@ -164,7 +164,7 @@ impl Default for ComponentsBuilder<(), (), (), ()> { /// A type that configures all the customizable components of the node and knows how to build them. /// -/// Implementors of this trait are responsible for building all the components of the node: See +/// Implementers of this trait are responsible for building all the components of the node: See /// [NodeComponents]. /// /// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index da3095815..2ac48e2ba 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -363,7 +363,7 @@ impl RpcServerArgs { impl RethRpcConfig for RpcServerArgs { fn is_ipc_enabled(&self) -> bool { - // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. + // By default IPC is enabled therefore it is enabled if the `ipcdisable` is false. !self.ipcdisable } diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 9304bbc5b..d320c7460 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -131,7 +131,7 @@ pub trait EngineApi { /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. #[method(name = "getPayloadBodiesByRangeV1")] diff --git a/crates/rpc/rpc-api/src/ganache.rs b/crates/rpc/rpc-api/src/ganache.rs index 0156f074a..338c91498 100644 --- a/crates/rpc/rpc-api/src/ganache.rs +++ b/crates/rpc/rpc-api/src/ganache.rs @@ -34,7 +34,7 @@ pub trait GanacheApi { /// is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.). /// - /// Reutnrs `true` if a snapshot was reverted, otherwise `false`. + /// Returns `true` if a snapshot was reverted, otherwise `false`. #[method(name = "revert")] async fn evm_revert(&self, snapshot_id: U256) -> RpcResult; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 62f82b8f8..4b9159e2d 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -717,7 +717,7 @@ impl RpcModuleSelection { /// Creates a new [RpcModule] based on the configured reth modules. /// - /// Note: This will always create new instance of the module handlers and is therefor only + /// Note: This will always create new instance of the module handlers and is therefore only /// recommended for launching standalone transports. If multiple transports need to be /// configured it's recommended to use the [RpcModuleBuilder]. #[allow(clippy::too_many_arguments)] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index be9f98832..eb3b1bfc7 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -292,7 +292,7 @@ where /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. pub async fn get_payload_bodies_by_range( @@ -661,7 +661,7 @@ where /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. /// diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/signer.rs index b744d83ef..578907604 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/signer.rs @@ -53,7 +53,7 @@ impl DevSigner { /// Generates a random dev signer which satisfies [EthSigner] trait pub(crate) fn random() -> Box { let mut signers = Self::random_signers(1); - signers.pop().expect("expect to generate at leas one signer") + signers.pop().expect("expect to generate at least one signer") } /// Generates provided number of random dev signers diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index fe5e2a97d..c75fa9b6b 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -12,7 +12,7 @@ //! //! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See //! the [EthApi] handler implementations for examples. The rpc-api traits make no use of the -//! available jsonrpsee `blocking` attribute to give implementors more freedom because the +//! available jsonrpsee `blocking` attribute to give implementers more freedom because the //! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a //! lot of handlers make use of async functions, caching for example, but are also using blocking //! disk-io, hence these calls are spawned as futures to a blocking task manually. diff --git a/crates/static-file/README.md b/crates/static-file/README.md index b6eb385dd..3aab25a97 100644 --- a/crates/static-file/README.md +++ b/crates/static-file/README.md @@ -106,7 +106,7 @@ In descending order of abstraction hierarchy: [`StaticFileProducer`](../../crates/static-file/src/static_file_producer.rs#L25): A `reth` [hook](../../crates/consensus/beacon/src/engine/hooks/static_file.rs) service that when triggered, **copies** finalized data from the database to the latest static file. Upon completion, it updates the internal index at `StaticFileProvider` with the new highest block and transaction on each specific segment. -[`StaticFileProvider`](../../crates/storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given everytime `ProviderFactory` creates a new `DatabaseProvider`. +[`StaticFileProvider`](../../crates/storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given every time `ProviderFactory` creates a new `DatabaseProvider`. [`StaticFileJarProvider`](../../crates/storage/provider/src/providers/static_file/jar.rs#L42) A provider similar to `DatabaseProvider` that provides access to a **single static file segment data** one a specific block range. diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 5376bf504..ee21883fe 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -136,7 +136,7 @@ where T::Key: std::hash::Hash + Arbitrary, T::Value: Arbitrary, { - let strat = proptest::collection::vec( + let strategy = proptest::collection::vec( any_with::>(( ::Parameters::default(), ::Parameters::default(), @@ -147,8 +147,8 @@ where .boxed(); let mut runner = TestRunner::new(ProptestConfig::default()); - let mut preload = strat.new_tree(&mut runner).unwrap().current(); - let mut input = strat.new_tree(&mut runner).unwrap().current(); + let mut preload = strategy.new_tree(&mut runner).unwrap().current(); + let mut input = strategy.new_tree(&mut runner).unwrap().current(); let mut unique_keys = HashSet::new(); preload.retain(|(k, _)| unique_keys.insert(k.clone())); diff --git a/crates/storage/db/src/abstraction/common.rs b/crates/storage/db/src/abstraction/common.rs index 9bce16e39..eef412935 100644 --- a/crates/storage/db/src/abstraction/common.rs +++ b/crates/storage/db/src/abstraction/common.rs @@ -23,7 +23,7 @@ mod sealed { use crate::{database::Database, mock::DatabaseMock, DatabaseEnv}; use std::sync::Arc; - /// Sealed trait to limit the implementors of the Database trait. + /// Sealed trait to limit the implementers of the Database trait. pub trait Sealed: Sized {} impl Sealed for &DB {} diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 31430fb99..218196c49 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -501,7 +501,7 @@ impl Default for Geometry { /// /// # Arguments /// -/// * `process_id` – A proceess id of the reader process. +/// * `process_id` – A process id of the reader process. /// * `thread_id` – A thread id of the reader thread. /// * `read_txn_id` – An oldest read transaction number on which stalled. /// * `gap` – A lag from the last committed txn. diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 4d311f273..cc4f2b0f5 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -1071,7 +1071,7 @@ mod tests { let num_rows = 2; // (missing_offsets, expected number of rows) - // If a row wasnt fully pruned, then it should clear it up as well + // If a row wasn't fully pruned, then it should clear it up as well let missing_offsets_scenarios = [(1, 1), (2, 1), (3, 0)]; for (missing_offsets, expected_rows) in missing_offsets_scenarios { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index a2dba78a0..e87be25c9 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -127,7 +127,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { tracing::warn!( target: "provider::historical_sp", target = self.block_number, - "Attempt to calculate state root for an old block might result in OOM, tread carefully" + "Attempt to calculate state root for an old block might result in OOM, treat carefully" ); } diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 9041593b5..3e798bb41 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -15,7 +15,7 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// Returns None if the transaction is not found. fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; - /// Get transaction by id, computes hash everytime so more expensive. + /// Get transaction by id, computes hash every time so more expensive. fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 970321323..7e733a659 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -399,7 +399,7 @@ impl PendingPool { unique_senders = self.highest_nonces.len(); non_local_senders -= unique_removed; - // we can re-use the temp array + // we can reuse the temp array removed.clear(); // loop through the highest nonces set, removing transactions until we reach the limit diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index dfc63c921..cdd897448 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1766,8 +1766,8 @@ pub(crate) struct PoolInternalTransaction { pub(crate) transaction: Arc>, /// The `SubPool` that currently contains this transaction. pub(crate) subpool: SubPool, - /// Keeps track of the current state of the transaction and therefor in which subpool it should - /// reside + /// Keeps track of the current state of the transaction and therefore in which subpool it + /// should reside pub(crate) state: TxState, /// The total cost all transactions before this transaction. /// From 86980836b8cc7e36ecf47400485f544cd2ffe2eb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 24 Apr 2024 00:37:24 +0200 Subject: [PATCH 022/250] fix: dont panic on invalid l1 blockinfo calldata (#7834) --- crates/revm/src/optimism/mod.rs | 40 ++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/crates/revm/src/optimism/mod.rs b/crates/revm/src/optimism/mod.rs index 470e7a914..0dc6c6877 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/revm/src/optimism/mod.rs @@ -32,13 +32,18 @@ pub fn extract_l1_info(block: &Block) -> Result Result Result { - // The setL1BlockValuesEcotone tx calldata must be exactly 160 bytes long, considering that - // we already removed the first 4 bytes (the function selector). Detailed breakdown: - // 8 bytes for the block sequence number - // + 4 bytes for the blob base fee scalar - // + 4 bytes for the base fee scalar - // + 8 bytes for the block number - // + 8 bytes for the block timestamp - // + 32 bytes for the base fee - // + 32 bytes for the blob base fee - // + 32 bytes for the block hash - // + 32 bytes for the batcher hash if data.len() != 160 { return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( reth_executor::OptimismBlockExecutionError::L1BlockInfoError { From dcad03c9b8c5375bc0aaba2ff3b481c31b3b03e1 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Wed, 24 Apr 2024 03:31:04 -0600 Subject: [PATCH 023/250] refactor(reth-basic-payload-builder): remove unnecessary assignment (#7835) --- crates/payload/basic/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 359a0fb16..7903dfa8d 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -410,7 +410,6 @@ where BuildOutcome::Better { payload, cached_reads } => { this.cached_reads = Some(cached_reads); debug!(target: "payload_builder", value = %payload.fees(), "built better payload"); - let payload = payload; this.best_payload = Some(payload); } BuildOutcome::Aborted { fees, cached_reads } => { From 9db17123b42a49fc911abb8fc4e92b26b9749c80 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:32:22 +0200 Subject: [PATCH 024/250] refactor: replace `to_primitive_transaction_kind` by `From` impl (#7831) --- crates/primitives/src/transaction/mod.rs | 9 +++++++++ .../rpc-types-compat/src/transaction/typed.rs | 18 ++++-------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 31cb277f0..c2df13305 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -868,6 +868,15 @@ impl TransactionKind { } } +impl From for TransactionKind { + fn from(kind: reth_rpc_types::TransactionKind) -> Self { + match kind { + reth_rpc_types::TransactionKind::Call(to) => Self::Call(to), + reth_rpc_types::TransactionKind::Create => Self::Create, + } + } +} + impl Compact for TransactionKind { fn to_compact(self, buf: &mut B) -> usize where diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index cc90c626e..6b0ed5294 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind.into(), value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,13 +56,3 @@ pub fn to_primitive_transaction( }), }) } - -/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TransactionKind] -pub fn to_primitive_transaction_kind( - kind: reth_rpc_types::TransactionKind, -) -> reth_primitives::TransactionKind { - match kind { - reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TransactionKind::Call(to), - reth_rpc_types::TransactionKind::Create => reth_primitives::TransactionKind::Create, - } -} From f372db40c5f6c85f3d1b468180a5233d6ce16e57 Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Wed, 24 Apr 2024 11:36:31 +0200 Subject: [PATCH 025/250] feat: blob e2e test (#7823) --- Cargo.lock | 2 + crates/e2e-test-utils/Cargo.toml | 2 +- crates/e2e-test-utils/src/engine_api.rs | 16 ++-- crates/e2e-test-utils/src/lib.rs | 12 ++- crates/e2e-test-utils/src/network.rs | 4 +- crates/e2e-test-utils/src/node.rs | 106 +++++++++++++---------- crates/e2e-test-utils/src/payload.rs | 4 +- crates/e2e-test-utils/src/rpc.rs | 24 +++++ crates/e2e-test-utils/src/transaction.rs | 80 +++++++++++++++++ crates/e2e-test-utils/src/wallet.rs | 55 +++++------- crates/node-ethereum/tests/e2e/blobs.rs | 96 ++++++++++++++++++++ crates/node-ethereum/tests/e2e/eth.rs | 39 +++++++-- crates/node-ethereum/tests/e2e/main.rs | 1 + crates/node-ethereum/tests/e2e/p2p.rs | 19 ++-- crates/optimism/node/tests/e2e/p2p.rs | 25 ++++-- crates/optimism/node/tests/e2e/utils.rs | 17 +--- 16 files changed, 373 insertions(+), 129 deletions(-) create mode 100644 crates/e2e-test-utils/src/rpc.rs create mode 100644 crates/e2e-test-utils/src/transaction.rs create mode 100644 crates/node-ethereum/tests/e2e/blobs.rs diff --git a/Cargo.lock b/Cargo.lock index 9c450fe70..d96e0fe3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,6 +155,7 @@ dependencies = [ "c-kzg", "serde", "sha2 0.10.8", + "thiserror", ] [[package]] @@ -185,6 +186,7 @@ dependencies = [ "alloy-serde", "arbitrary", "c-kzg", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "once_cell", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 96b4ca2e6..03e0edb91 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -33,5 +33,5 @@ alloy-signer.workspace = true alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true -alloy-consensus.workspace = true +alloy-consensus = { workspace = true, features = ["kzg"] } tracing.workspace = true \ No newline at end of file diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index fe05b0b68..9ede69e67 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -13,13 +13,13 @@ use reth_primitives::B256; use std::marker::PhantomData; /// Helper for engine api operations -pub struct EngineApiHelper { +pub struct EngineApiTestContext { pub canonical_stream: CanonStateNotificationStream, pub engine_api_client: HttpClient, pub _marker: PhantomData, } -impl EngineApiHelper { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, @@ -34,6 +34,7 @@ impl EngineApiHelper { payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, expected_status: PayloadStatusEnum, + versioned_hashes: Vec, ) -> eyre::Result where E::ExecutionPayloadV3: From + PayloadEnvelopeExt, @@ -45,7 +46,7 @@ impl EngineApiHelper { let submission = EngineApiClient::::new_payload_v3( &self.engine_api_client, envelope_v3.execution_payload(), - vec![], + versioned_hashes, payload_builder_attributes.parent_beacon_block_root().unwrap(), ) .await?; @@ -56,18 +57,17 @@ impl EngineApiHelper { } /// Sends forkchoice update to the engine api - pub async fn update_forkchoice(&self, hash: B256) -> eyre::Result<()> { + pub async fn update_forkchoice(&self, current_head: B256, new_head: B256) -> eyre::Result<()> { EngineApiClient::::fork_choice_updated_v2( &self.engine_api_client, ForkchoiceState { - head_block_hash: hash, - safe_block_hash: hash, - finalized_block_hash: hash, + head_block_hash: new_head, + safe_block_hash: current_head, + finalized_block_hash: current_head, }, None, ) .await?; - Ok(()) } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 3c34f76e5..8e57eebed 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,4 +1,4 @@ -use node::NodeHelper; +use node::NodeTestContext; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, @@ -18,6 +18,9 @@ use wallet::Wallet; /// Wrapper type to create test nodes pub mod node; +/// Helper for transaction operations +pub mod transaction; + /// Helper type to yield accounts from mnemonic pub mod wallet; @@ -29,6 +32,8 @@ mod network; /// Helper for engine api operations mod engine_api; +/// Helper for rpc operations +mod rpc; /// Helper traits mod traits; @@ -75,7 +80,7 @@ where .launch() .await?; - let mut node = NodeHelper::new(node).await?; + let mut node = NodeTestContext::new(node).await?; // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { @@ -104,4 +109,5 @@ type TmpPool = <>>::PoolBuilde type TmpNodeAdapter = FullNodeTypesAdapter>; /// Type alias for a type of NodeHelper -pub type NodeHelperType = NodeHelper, TmpPool>>; +pub type NodeHelperType = + NodeTestContext, TmpPool>>; diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 341b0d7d0..92e9b316a 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -5,12 +5,12 @@ use reth_tracing::tracing::info; use tokio_stream::wrappers::UnboundedReceiverStream; /// Helper for network operations -pub struct NetworkHelper { +pub struct NetworkTestContext { network_events: UnboundedReceiverStream, network: NetworkHandle, } -impl NetworkHelper { +impl NetworkTestContext { /// Creates a new network helper pub fn new(network: NetworkHandle) -> Self { let network_events = network.event_listener(); diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 18d147fd9..b2ccf899e 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,35 +1,36 @@ use crate::{ - engine_api::EngineApiHelper, network::NetworkHelper, payload::PayloadHelper, - traits::PayloadEnvelopeExt, + engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, + rpc::RpcTestContext, traits::PayloadEnvelopeExt, }; + use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; + use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, - rpc::{ - eth::{error::EthResult, EthTransactions}, - types::engine::PayloadStatusEnum, - }, + rpc::types::engine::PayloadStatusEnum, }; +use reth_node_builder::NodeTypes; use reth_primitives::{stage::StageId, BlockHash, BlockNumber, Bytes, B256}; use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; /// An helper struct to handle node actions -pub struct NodeHelper +pub struct NodeTestContext where Node: FullNodeComponents, { pub inner: FullNode, - pub payload: PayloadHelper, - pub network: NetworkHelper, - pub engine_api: EngineApiHelper, + pub payload: PayloadTestContext, + pub network: NetworkTestContext, + pub engine_api: EngineApiTestContext, + pub rpc: RpcTestContext, } -impl NodeHelper +impl NodeTestContext where Node: FullNodeComponents, { @@ -39,17 +40,18 @@ where Ok(Self { inner: node.clone(), - network: NetworkHelper::new(node.network.clone()), - payload: PayloadHelper::new(builder).await?, - engine_api: EngineApiHelper { + payload: PayloadTestContext::new(builder).await?, + network: NetworkTestContext::new(node.network.clone()), + engine_api: EngineApiTestContext { engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, }, + rpc: RpcTestContext { inner: node.rpc_registry }, }) } - pub async fn connect(&mut self, node: &mut NodeHelper) { + pub async fn connect(&mut self, node: &mut NodeTestContext) { self.network.add_peer(node.network.record()).await; node.network.add_peer(self.network.record()).await; node.network.expect_session().await; @@ -62,7 +64,7 @@ where pub async fn advance( &mut self, length: u64, - tx_generator: impl Fn() -> Pin>>, + tx_generator: impl Fn(u64) -> Pin>>, attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + Copy, ) -> eyre::Result< @@ -76,60 +78,74 @@ where From<::BuiltPayload> + PayloadEnvelopeExt, { let mut chain = Vec::with_capacity(length as usize); - for _ in 0..length { - let (payload, _) = - self.advance_block(tx_generator().await, attributes_generator).await?; - chain.push(payload); + for i in 0..length { + let raw_tx = tx_generator(i).await; + let tx_hash = self.rpc.inject_tx(raw_tx).await?; + let (payload, eth_attr) = self.advance_block(vec![], attributes_generator).await?; + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + self.assert_new_block(tx_hash, block_hash, block_number).await?; + chain.push((payload, eth_attr)); } Ok(chain) } - /// Advances the node forward one block - pub async fn advance_block( + /// Creates a new payload from given attributes generator + /// expects a payload attribute event and waits until the payload is built. + /// + /// It triggers the resolve payload via engine api and expects the built payload event. + pub async fn new_payload( &mut self, - raw_tx: Bytes, attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, ) -> eyre::Result<( - ( - ::BuiltPayload, - ::PayloadBuilderAttributes, - ), - B256, + <::Engine as EngineTypes>::BuiltPayload, + <::Engine as EngineTypes>::PayloadBuilderAttributes, )> where ::ExecutionPayloadV3: From<::BuiltPayload> + PayloadEnvelopeExt, { - // push tx into pool via RPC server - let tx_hash = self.inject_tx(raw_tx).await?; - // trigger new payload building draining the pool let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); - // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; - // wait for the payload builder to have finished building self.payload.wait_for_built_payload(eth_attr.payload_id()).await; - // trigger resolve payload via engine api self.engine_api.get_payload_v3(eth_attr.payload_id()).await?; - // ensure we're also receiving the built payload as event - let payload = self.payload.expect_built_payload().await?; + Ok((self.payload.expect_built_payload().await?, eth_attr)) + } + + /// Advances the node forward one block + pub async fn advance_block( + &mut self, + versioned_hashes: Vec, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + ) -> eyre::Result<( + ::BuiltPayload, + <::Engine as EngineTypes>::PayloadBuilderAttributes, + )> + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { + let (payload, eth_attr) = self.new_payload(attributes_generator).await?; - // submit payload via engine api let block_hash = self .engine_api - .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) + .submit_payload( + payload.clone(), + eth_attr.clone(), + PayloadStatusEnum::Valid, + versioned_hashes, + ) .await?; // trigger forkchoice update via engine api to commit the block to the blockchain - self.engine_api.update_forkchoice(block_hash).await?; + self.engine_api.update_forkchoice(block_hash, block_hash).await?; - // assert the block has been committed to the blockchain - self.assert_new_block(tx_hash, block_hash, payload.block().number).await?; - Ok(((payload, eth_attr), tx_hash)) + Ok((payload, eth_attr)) } /// Waits for block to be available on node. @@ -169,12 +185,6 @@ where Ok(()) } - /// Injects a raw transaction into the node tx pool via RPC server - async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { - let eth_api = self.inner.rpc_registry.eth_api(); - eth_api.send_raw_transaction(raw_tx).await - } - /// Asserts that a new block has been added to the blockchain /// and the tx has been included in the block pub async fn assert_new_block( diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 2d349721b..47f4134d7 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -4,13 +4,13 @@ use reth_payload_builder::{Events, PayloadBuilderHandle, PayloadId}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -pub struct PayloadHelper { +pub struct PayloadTestContext { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, pub timestamp: u64, } -impl PayloadHelper { +impl PayloadTestContext { /// Creates a new payload helper pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs new file mode 100644 index 000000000..09f161a91 --- /dev/null +++ b/crates/e2e-test-utils/src/rpc.rs @@ -0,0 +1,24 @@ +use alloy_consensus::TxEnvelope; +use alloy_network::eip2718::Decodable2718; +use reth::{api::FullNodeComponents, builder::rpc::RpcRegistry, rpc::api::DebugApiServer}; +use reth_primitives::{Bytes, B256}; +use reth_rpc::eth::{error::EthResult, EthTransactions}; + +pub struct RpcTestContext { + pub inner: RpcRegistry, +} + +impl RpcTestContext { + /// Injects a raw transaction into the node tx pool via RPC server + pub async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { + let eth_api = self.inner.eth_api(); + eth_api.send_raw_transaction(raw_tx).await + } + + /// Retrieves a transaction envelope by its hash + pub async fn envelope_by_hash(&mut self, hash: B256) -> eyre::Result { + let tx = self.inner.debug_api().raw_transaction(hash).await?.unwrap(); + let tx = tx.to_vec(); + Ok(TxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) + } +} diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs new file mode 100644 index 000000000..a2c40052c --- /dev/null +++ b/crates/e2e-test-utils/src/transaction.rs @@ -0,0 +1,80 @@ +use alloy_consensus::{ + BlobTransactionSidecar, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, +}; +use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; +use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_signer_wallet::LocalWallet; +use eyre::Ok; +use reth_primitives::{hex, Address, Bytes, U256}; + +use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, B256}; + +pub struct TransactionTestContext; + +impl TransactionTestContext { + /// Creates a static transfer and signs it + pub async fn transfer_tx(chain_id: u64, wallet: LocalWallet) -> Bytes { + let tx = tx(chain_id, None, 0); + let signer = EthereumSigner::from(wallet); + tx.build(&signer).await.unwrap().encoded_2718().into() + } + + /// Creates a tx with blob sidecar and sign it + pub async fn tx_with_blobs(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + let mut tx = tx(chain_id, None, 0); + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + let sidecar: BlobTransactionSidecar = builder.build()?; + + tx.set_blob_sidecar(sidecar); + tx.set_max_fee_per_blob_gas(15e9 as u128); + + let signer = EthereumSigner::from(wallet); + let signed = tx.clone().build(&signer).await.unwrap(); + + Ok(signed.encoded_2718().into()) + } + + pub async fn optimism_l1_block_info_tx( + chain_id: u64, + wallet: LocalWallet, + nonce: u64, + ) -> Bytes { + let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + let tx = tx(chain_id, Some(l1_block_info), nonce); + let signer = EthereumSigner::from(wallet); + tx.build(&signer).await.unwrap().encoded_2718().into() + } + + /// Validates the sidecar of a given tx envelope and returns the versioned hashes + pub fn validate_sidecar(tx: TxEnvelope) -> Vec { + let proof_setting = MAINNET_KZG_TRUSTED_SETUP.clone(); + + match tx { + TxEnvelope::Eip4844(signed) => match signed.tx() { + TxEip4844Variant::TxEip4844WithSidecar(tx) => { + tx.validate_blob(&proof_setting).unwrap(); + tx.sidecar.versioned_hashes().collect() + } + _ => panic!("Expected Eip4844 transaction with sidecar"), + }, + _ => panic!("Expected Eip4844 transaction"), + } + } +} + +/// Creates a type 2 transaction +fn tx(chain_id: u64, data: Option, nonce: u64) -> TransactionRequest { + TransactionRequest { + nonce: Some(nonce), + value: Some(U256::from(100)), + to: Some(Address::random()), + gas: Some(210000), + max_fee_per_gas: Some(20e9 as u128), + max_priority_fee_per_gas: Some(20e9 as u128), + chain_id: Some(chain_id), + input: TransactionInput { input: None, data }, + ..Default::default() + } +} diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index d064eede9..d94dec2a0 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,19 +1,19 @@ -use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; -use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_signer::Signer; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; -use reth_primitives::{hex, Address, Bytes, U256}; + /// One of the accounts of the genesis allocations. pub struct Wallet { - inner: LocalWallet, - pub nonce: u64, + pub inner: LocalWallet, chain_id: u64, + amount: usize, + derivation_path: Option, } impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) - pub(crate) fn new(phrase: &str) -> Self { - let inner = MnemonicBuilder::::default().phrase(phrase).build().unwrap(); - Self { inner, chain_id: 1, nonce: 0 } + pub fn new(amount: usize) -> Self { + let inner = MnemonicBuilder::::default().phrase(TEST_MNEMONIC).build().unwrap(); + Self { inner, chain_id: 1, amount, derivation_path: None } } /// Sets chain id @@ -22,31 +22,24 @@ impl Wallet { self } - /// Creates a static transfer and signs it - pub async fn transfer_tx(&mut self) -> Bytes { - self.tx(None).await + fn get_derivation_path(&self) -> &str { + self.derivation_path.as_deref().unwrap_or("m/44'/60'/0'/0/") } - pub async fn optimism_l1_block_info_tx(&mut self) -> Bytes { - let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - self.tx(Some(l1_block_info)).await - } + pub fn gen(&self) -> Vec { + let builder = MnemonicBuilder::::default().phrase(TEST_MNEMONIC); + + // use the derivation path + let derivation_path = self.get_derivation_path(); - /// Creates a transaction with data and signs it - pub async fn tx(&mut self, data: Option) -> Bytes { - let tx = TransactionRequest { - nonce: Some(self.nonce), - value: Some(U256::from(100)), - to: Some(Address::random()), - gas_price: Some(20e9 as u128), - gas: Some(210000), - chain_id: Some(self.chain_id), - input: TransactionInput { input: None, data }, - ..Default::default() - }; - self.nonce += 1; - let signer = EthereumSigner::from(self.inner.clone()); - tx.build(&signer).await.unwrap().encoded_2718().into() + let mut wallets = Vec::with_capacity(self.amount); + for idx in 0..self.amount { + let builder = + builder.clone().derivation_path(&format!("{derivation_path}{idx}")).unwrap(); + let wallet = builder.build().unwrap().with_chain_id(Some(self.chain_id)); + wallets.push(wallet) + } + wallets } } @@ -54,6 +47,6 @@ const TEST_MNEMONIC: &str = "test test test test test test test test test test t impl Default for Wallet { fn default() -> Self { - Wallet::new(TEST_MNEMONIC) + Wallet::new(1) } } diff --git a/crates/node-ethereum/tests/e2e/blobs.rs b/crates/node-ethereum/tests/e2e/blobs.rs new file mode 100644 index 000000000..d8fca42d6 --- /dev/null +++ b/crates/node-ethereum/tests/e2e/blobs.rs @@ -0,0 +1,96 @@ +use std::sync::Arc; + +use reth::{ + args::RpcServerArgs, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + rpc::types::engine::PayloadStatusEnum, + tasks::TaskManager, +}; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{b256, ChainSpecBuilder, Genesis, MAINNET}; +use reth_transaction_pool::TransactionPool; + +use crate::utils::eth_payload_attributes; + +#[tokio::test] +async fn can_handle_blobs() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node).await?; + + let wallets = Wallet::new(2).gen(); + let blob_wallet = wallets.first().unwrap(); + let second_wallet = wallets.last().unwrap(); + + // inject normal tx + let raw_tx = TransactionTestContext::transfer_tx(1, second_wallet.clone()).await; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + // build payload with normal tx + let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; + + // clean the pool + node.inner.pool.remove_transactions(vec![tx_hash]); + + // build blob tx + let blob_tx = TransactionTestContext::tx_with_blobs(1, blob_wallet.clone()).await?; + + // inject blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // validate sidecar + let versioned_hashes = TransactionTestContext::validate_sidecar(envelope); + + // build a payload + let (blob_payload, blob_attr) = node.new_payload(eth_payload_attributes).await?; + + // submit the blob payload + let blob_block_hash = node + .engine_api + .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) + .await?; + + let genesis_hash = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + + let (_, _) = tokio::join!( + // send fcu with blob hash + node.engine_api.update_forkchoice(genesis_hash, blob_block_hash), + // send fcu with normal hash + node.engine_api.update_forkchoice(genesis_hash, payload.block().hash()) + ); + + // submit normal payload + node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid, vec![]).await?; + + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + + // expects the blob tx to be back in the pool + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // make sure the sidecar is present + TransactionTestContext::validate_sidecar(envelope); + + Ok(()) +} diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/node-ethereum/tests/e2e/eth.rs index 39ba5e232..4f566e7c8 100644 --- a/crates/node-ethereum/tests/e2e/eth.rs +++ b/crates/node-ethereum/tests/e2e/eth.rs @@ -4,7 +4,9 @@ use reth::{ builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; -use reth_e2e_test_utils::{node::NodeHelper, setup, wallet::Wallet}; +use reth_e2e_test_utils::{ + node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, +}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; @@ -13,7 +15,7 @@ use std::sync::Arc; async fn can_run_eth_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, mut wallet) = setup::( + let (mut nodes, _tasks, _wallet) = setup::( 1, Arc::new( ChainSpecBuilder::default() @@ -27,10 +29,20 @@ async fn can_run_eth_node() -> eyre::Result<()> { .await?; let mut node = nodes.pop().unwrap(); - let raw_tx = wallet.transfer_tx().await; + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; + + // make the node advance + let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - node.advance_block(raw_tx, eth_payload_attributes).await?; + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + node.assert_new_block(tx_hash, block_hash, block_number).await?; Ok(()) } @@ -62,14 +74,23 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { .node(EthereumNode::default()) .launch() .await?; - let mut node = NodeHelper::new(node).await?; + let mut node = NodeTestContext::new(node).await?; // Configure wallet from test mnemonic and create dummy transfer tx - let mut wallet = Wallet::default(); - let raw_tx = wallet.transfer_tx().await; + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; // make the node advance - node.advance_block(raw_tx, crate::utils::eth_payload_attributes).await?; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + + // make the node advance + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + node.assert_new_block(tx_hash, block_hash, block_number).await?; Ok(()) } @@ -99,7 +120,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr .launch() .await?; - let node = NodeHelper::new(node).await?; + let node = NodeTestContext::new(node).await?; // Ensure that the engine api client is not available let client = node.inner.engine_ipc_client().await; diff --git a/crates/node-ethereum/tests/e2e/main.rs b/crates/node-ethereum/tests/e2e/main.rs index 6a8a01064..1d0d6db8c 100644 --- a/crates/node-ethereum/tests/e2e/main.rs +++ b/crates/node-ethereum/tests/e2e/main.rs @@ -1,3 +1,4 @@ +mod blobs; mod dev; mod eth; mod p2p; diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/node-ethereum/tests/e2e/p2p.rs index c7ce2a7c1..768d1ac5a 100644 --- a/crates/node-ethereum/tests/e2e/p2p.rs +++ b/crates/node-ethereum/tests/e2e/p2p.rs @@ -1,5 +1,5 @@ use crate::utils::eth_payload_attributes; -use reth_e2e_test_utils::setup; +use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, MAINNET}; use std::sync::Arc; @@ -8,7 +8,7 @@ use std::sync::Arc; async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, mut wallet) = setup::( + let (mut nodes, _tasks, wallet) = setup::( 2, Arc::new( ChainSpecBuilder::default() @@ -21,17 +21,24 @@ async fn can_sync() -> eyre::Result<()> { ) .await?; - let raw_tx = wallet.transfer_tx().await; + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; let mut second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); // Make the first node advance - let ((payload, _), tx_hash) = - first_node.advance_block(raw_tx.clone(), eth_payload_attributes).await?; + let tx_hash = first_node.rpc.inject_tx(raw_tx).await?; + + // make the node advance + let (payload, _) = first_node.advance_block(vec![], eth_payload_attributes).await?; + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + first_node.assert_new_block(tx_hash, block_hash, block_number).await?; // only send forkchoice update to second node - second_node.engine_api.update_forkchoice(block_hash).await?; + second_node.engine_api.update_forkchoice(block_hash, block_hash).await?; // expect second node advanced via p2p gossip second_node.assert_new_block(tx_hash, block_hash, 1).await?; diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index da6af2090..a38fadf67 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,13 +1,15 @@ use crate::utils::{advance_chain, setup}; -use std::sync::Arc; -use tokio::sync::Mutex; +use reth::primitives::BASE_MAINNET; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet}; +use reth_primitives::ChainId; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, wallet) = setup(2).await?; - let wallet = Arc::new(Mutex::new(wallet)); + let chain_id: ChainId = BASE_MAINNET.chain.into(); + + let (mut nodes, _tasks, _wallet) = setup(2).await?; let second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); @@ -15,13 +17,24 @@ async fn can_sync() -> eyre::Result<()> { let tip: usize = 300; let tip_index: usize = tip - 1; + let wallet = Wallet::default(); + // On first node, create a chain up to block number 300a - let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; + let canonical_payload_chain = advance_chain(tip, &mut first_node, |nonce: u64| { + let wallet = wallet.inner.clone(); + Box::pin(async move { + TransactionTestContext::optimism_l1_block_info_tx(chain_id, wallet, nonce).await + }) + }) + .await?; let canonical_chain = canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); // On second node, sync up to block number 300a - second_node.engine_api.update_forkchoice(canonical_chain[tip_index]).await?; + second_node + .engine_api + .update_forkchoice(canonical_chain[tip_index], canonical_chain[tip_index]) + .await?; second_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; Ok(()) diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 5322cad9a..e86a7c654 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,10 +1,9 @@ -use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth::{primitives::Bytes, rpc::types::engine::PayloadAttributes, tasks::TaskManager}; use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType}; use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; -use std::sync::Arc; -use tokio::sync::Mutex; +use std::{future::Future, pin::Pin, sync::Arc}; /// Optimism Node Helper type pub(crate) type OpNode = NodeHelperType; @@ -28,17 +27,9 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, - wallet: Arc>, + tx_generator: impl Fn(u64) -> Pin>>, ) -> eyre::Result> { - node.advance( - length as u64, - || { - let wallet = wallet.clone(); - Box::pin(async move { wallet.lock().await.optimism_l1_block_info_tx().await }) - }, - optimism_payload_attributes, - ) - .await + node.advance(length as u64, tx_generator, optimism_payload_attributes).await } /// Helper function to create a new eth payload attributes From dc6a02ce783b5ec9b821e76bd4a76d6317bca752 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 24 Apr 2024 10:38:38 +0100 Subject: [PATCH 026/250] docs(book): recommend running with Docker Compose (#7637) --- book/installation/installation.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/book/installation/installation.md b/book/installation/installation.md index 9ecf71cc5..edd8849af 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -8,6 +8,11 @@ There are three core methods to obtain Reth: * [Docker images](./docker.md) * [Building from source.](./source.md) +> **Note** +> +> If you have Docker installed, we recommend using the [Docker Compose](./docker.md#using-docker-compose) configuration +> that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command. + ## Hardware Requirements The hardware requirements for running Reth depend on the node configuration and can change over time as the network grows or new features are implemented. From 4f81f3acc9fdc56b18444c6754f852b7060c57ee Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 24 Apr 2024 11:53:54 +0200 Subject: [PATCH 027/250] feat(discv5): recycle clean up code (#7727) Co-authored-by: Oliver Nordbjerg --- crates/net/discv5/src/config.rs | 58 ++++---- crates/net/discv5/src/filter.rs | 2 +- crates/net/discv5/src/lib.rs | 210 +++++++++++++++------------ crates/net/discv5/src/metrics.rs | 9 +- crates/net/discv5/src/network_key.rs | 11 ++ crates/net/network/src/config.rs | 15 +- 6 files changed, 173 insertions(+), 132 deletions(-) create mode 100644 crates/net/discv5/src/network_key.rs diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 809f0fa32..bf15be861 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -2,6 +2,7 @@ use std::{ collections::HashSet, + fmt::Debug, net::{IpAddr, SocketAddr}, }; @@ -10,14 +11,7 @@ use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; use reth_primitives::{Bytes, ForkId, NodeRecord, MAINNET}; -use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys}; - -/// L1 EL -pub const ETH: &[u8] = b"eth"; -/// L1 CL -pub const ETH2: &[u8] = b"eth2"; -/// Optimism -pub const OPSTACK: &[u8] = b"opstack"; +use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, network_key}; /// Default interval in seconds at which to run a lookup up query. /// @@ -31,14 +25,18 @@ pub struct ConfigBuilder { discv5_config: Option, /// Nodes to boot from. bootstrap_nodes: HashSet, - /// [`ForkId`] to set in local node record. + /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. + /// + /// Defaults to L1 mainnet if not set. fork: Option<(&'static [u8], ForkId)>, /// RLPx TCP port to advertise. Note: so long as `reth_network` handles [`NodeRecord`]s as /// opposed to [`Enr`](enr::Enr)s, TCP is limited to same IP address as UDP, since /// [`NodeRecord`] doesn't supply an extra field for and alternative TCP address. tcp_port: u16, - /// Additional kv-pairs that should be advertised to peers by including in local node record. - other_enr_data: Vec<(&'static str, Bytes)>, + /// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record + /// (in addition to tcp port, udp port and fork). + other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query to populate kbuckets. lookup_interval: Option, /// Custom filter rules to apply to a discovered peer in order to determine if it should be @@ -52,9 +50,9 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork: fork_id, + fork, tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval, discovered_peer_filter, } = discv5_config; @@ -62,9 +60,9 @@ impl ConfigBuilder { Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some(fork_id), + fork: Some(fork), tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval: Some(lookup_interval), discovered_peer_filter: Some(discovered_peer_filter), } @@ -117,9 +115,10 @@ impl ConfigBuilder { self } - /// Set [`ForkId`], and key used to identify it, to set in local [`Enr`](discv5::enr::Enr). - pub fn fork(mut self, key: &'static [u8], value: ForkId) -> Self { - self.fork = Some((key, value)); + /// Set fork ID kv-pair to set in local [`Enr`](discv5::enr::Enr). This lets peers on discovery + /// network know which chain this node belongs to. + pub fn fork(mut self, network_key: &'static [u8], fork_id: ForkId) -> Self { + self.fork = Some((network_key, fork_id)); self } @@ -129,9 +128,10 @@ impl ConfigBuilder { self } - /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). - pub fn add_enr_kv_pair(mut self, kv_pair: (&'static str, Bytes)) -> Self { - self.other_enr_data.push(kv_pair); + /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). Takes the key + /// to use for the kv-pair and the rlp encoded value. + pub fn add_enr_kv_pair(mut self, key: &'static [u8], value: Bytes) -> Self { + self.other_enr_kv_pairs.push((key, value)); self } @@ -152,7 +152,7 @@ impl ConfigBuilder { bootstrap_nodes, fork, tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval, discovered_peer_filter, } = self; @@ -160,19 +160,19 @@ impl ConfigBuilder { let discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let fork = fork.unwrap_or((ETH, MAINNET.latest_fork_id())); + let fork = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); let discovered_peer_filter = - discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[ETH2])); + discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[network_key::ETH2])); Config { discv5_config, bootstrap_nodes, fork, tcp_port, - other_enr_data, + other_enr_kv_pairs, lookup_interval, discovered_peer_filter, } @@ -187,12 +187,14 @@ pub struct Config { pub(super) discv5_config: discv5::Config, /// Nodes to boot from. pub(super) bootstrap_nodes: HashSet, - /// [`ForkId`] to set in local node record. + /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. pub(super) fork: (&'static [u8], ForkId), /// RLPx TCP port to advertise. pub(super) tcp_port: u16, - /// Additional kv-pairs to include in local node record. - pub(super) other_enr_data: Vec<(&'static str, Bytes)>, + /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to + /// peers by including in local node record. + pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query with to populate kbuckets. pub(super) lookup_interval: u64, /// Custom filter rules to apply to a discovered peer in order to determine if it should be diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 5cb7be18c..f2f2f2fd6 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -96,7 +96,7 @@ mod tests { use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::config::{ETH, ETH2}; + use crate::network_key::{ETH, ETH2}; use super::*; diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 218d4299d..7e9fd81b1 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -33,6 +33,7 @@ pub mod enr; pub mod error; pub mod filter; pub mod metrics; +pub mod network_key; pub use discv5::{self, IpMode}; @@ -40,11 +41,13 @@ pub use config::{BootNode, Config, ConfigBuilder}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; + use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; -/// Default number of times to do pulse lookup queries, at bootstrap (5 second intervals). +/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting +/// to 5 seconds). /// -/// Default is 100 seconds. +/// Default is 100 counts. pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; /// Default duration of look up interval, for pulse look ups at bootstrap. @@ -52,7 +55,7 @@ pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; /// Default is 5 seconds. pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; -/// Max kbucket index. +/// Max kbucket index is 255. /// /// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See . pub const MAX_KBUCKET_INDEX: usize = 255; @@ -71,8 +74,8 @@ pub struct Discv5 { discv5: Arc, /// [`IpMode`] of the the node. ip_mode: IpMode, - /// Key used in kv-pair to ID chain. - fork_id_key: &'static [u8], + /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. + fork_key: &'static [u8], /// Filter applied to a discovered peers before passing it up to app. discovered_peer_filter: MustNotIncludeKeys, /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. @@ -165,82 +168,21 @@ impl Discv5 { // // 1. make local enr from listen config // - let Config { - discv5_config, - bootstrap_nodes, - fork, - tcp_port, - other_enr_data, - lookup_interval, - discovered_peer_filter, - } = discv5_config; - - let (enr, bc_enr, ip_mode, fork_id_key) = { - let mut builder = discv5::enr::Enr::builder(); - - let (ip_mode, socket) = match discv5_config.listen_config { - ListenConfig::Ipv4 { ip, port } => { - if ip != Ipv4Addr::UNSPECIFIED { - builder.ip4(ip); - } - builder.udp4(port); - builder.tcp4(tcp_port); - - (IpMode::Ip4, (ip, port).into()) - } - ListenConfig::Ipv6 { ip, port } => { - if ip != Ipv6Addr::UNSPECIFIED { - builder.ip6(ip); - } - builder.udp6(port); - builder.tcp6(tcp_port); - - (IpMode::Ip6, (ip, port).into()) - } - ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { - if ipv4 != Ipv4Addr::UNSPECIFIED { - builder.ip4(ipv4); - } - builder.udp4(ipv4_port); - builder.tcp4(tcp_port); - - if ipv6 != Ipv6Addr::UNSPECIFIED { - builder.ip6(ipv6); - } - builder.udp6(ipv6_port); - - (IpMode::DualStack, (ipv6, ipv6_port).into()) - } - }; - - // add fork id - let (chain, fork_id) = fork; - builder.add_value_rlp(chain, alloy_rlp::encode(fork_id).into()); - - // add other data - for (key, value) in other_enr_data { - builder.add_value_rlp(key, alloy_rlp::encode(value).into()); - } - - // enr v4 not to get confused with discv4, independent versioning enr and - // discovery - let enr = builder.build(sk).expect("should build enr v4"); - let EnrCombinedKeyWrapper(enr) = enr.into(); - - trace!(target: "net::discv5", - ?enr, - "local ENR" - ); + let (enr, bc_enr, fork_key, ip_mode) = Self::build_local_enr(sk, &discv5_config); - // backwards compatible enr - let bc_enr = NodeRecord::from_secret_key(socket, sk); - - (enr, bc_enr, ip_mode, chain) - }; + trace!(target: "net::discv5", + ?enr, + "local ENR" + ); // // 2. start discv5 // + let Config { + discv5_config, bootstrap_nodes, lookup_interval, discovered_peer_filter, .. + } = discv5_config; + + let EnrCombinedKeyWrapper(enr) = enr.into(); let sk = discv5::enr::CombinedKey::secp256k1_from_bytes(&mut sk.secret_bytes()).unwrap(); let mut discv5 = match discv5::Discv5::new(enr, sk, discv5_config) { Ok(discv5) => discv5, @@ -261,17 +203,79 @@ impl Discv5 { let metrics = Discv5Metrics::default(); // - // 4. bg kbuckets maintenance + // 4. start bg kbuckets maintenance // Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); Ok(( - Self { discv5, ip_mode, fork_id_key, discovered_peer_filter, metrics }, + Self { discv5, ip_mode, fork_key, discovered_peer_filter, metrics }, discv5_updates, bc_enr, )) } + fn build_local_enr( + sk: &SecretKey, + config: &Config, + ) -> (Enr, NodeRecord, &'static [u8], IpMode) { + let mut builder = discv5::enr::Enr::builder(); + + let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; + + let (ip_mode, socket) = match discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => { + if ip != Ipv4Addr::UNSPECIFIED { + builder.ip4(ip); + } + builder.udp4(port); + builder.tcp4(*tcp_port); + + (IpMode::Ip4, (ip, port).into()) + } + ListenConfig::Ipv6 { ip, port } => { + if ip != Ipv6Addr::UNSPECIFIED { + builder.ip6(ip); + } + builder.udp6(port); + builder.tcp6(*tcp_port); + + (IpMode::Ip6, (ip, port).into()) + } + ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { + if ipv4 != Ipv4Addr::UNSPECIFIED { + builder.ip4(ipv4); + } + builder.udp4(ipv4_port); + builder.tcp4(*tcp_port); + + if ipv6 != Ipv6Addr::UNSPECIFIED { + builder.ip6(ipv6); + } + builder.udp6(ipv6_port); + + (IpMode::DualStack, (ipv6, ipv6_port).into()) + } + }; + + // identifies which network node is on + let (network, fork_value) = fork; + builder.add_value_rlp(network, alloy_rlp::encode(fork_value).into()); + + // add other data + for (key, value) in other_enr_kv_pairs { + builder.add_value_rlp(key, value.clone().into()); + } + + // enr v4 not to get confused with discv4, independent versioning enr and + // discovery + let enr = builder.build(sk).expect("should build enr v4"); + + // backwards compatible enr + let bc_enr = NodeRecord::from_secret_key(socket, sk); + + (enr, bc_enr, network, ip_mode) + } + /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. async fn bootstrap( bootstrap_nodes: HashSet, @@ -423,20 +427,20 @@ impl Discv5 { return None } }; - let fork_id = match self.filter_discovered_peer(enr) { - FilterOutcome::Ok => self.get_fork_id(enr).ok(), - FilterOutcome::Ignore { reason } => { - trace!(target: "net::discovery::discv5", - ?enr, - reason, - "filtered out discovered peer" - ); + if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { + trace!(target: "net::discovery::discv5", + ?enr, + reason, + "filtered out discovered peer" + ); - self.metrics.discovered_peers.increment_established_sessions_filtered(1); + self.metrics.discovered_peers.increment_established_sessions_filtered(1); - return None - } - }; + return None + } + + let fork_id = + (self.fork_key == network_key::ETH).then(|| self.get_fork_id(enr).ok()).flatten(); trace!(target: "net::discovery::discv5", ?fork_id, @@ -485,7 +489,7 @@ impl Discv5 { &self, enr: &discv5::enr::Enr, ) -> Result { - let key = self.fork_id_key; + let key = self.fork_key; let mut fork_id_bytes = enr.get_raw_rlp(key).ok_or(Error::ForkMissing(key))?; Ok(ForkId::decode(&mut fork_id_bytes)?) @@ -513,8 +517,8 @@ impl Discv5 { } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). - pub fn fork_id_key(&self) -> &[u8] { - self.fork_id_key + pub fn fork_key(&self) -> &[u8] { + self.fork_key } } @@ -603,6 +607,7 @@ pub async fn lookup( mod tests { use super::*; use ::enr::{CombinedKey, EnrKey}; + use reth_primitives::MAINNET; use secp256k1::rand::thread_rng; use tracing::trace; @@ -618,7 +623,7 @@ mod tests { .unwrap(), ), ip_mode: IpMode::Ip4, - fork_id_key: b"noop", + fork_key: b"noop", discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), } @@ -818,4 +823,21 @@ mod tests { assert_eq!(local_node_id.log2_distance(&target), Some(bucket_index as u64 + 1)); } } + + #[test] + fn build_enr_from_config() { + const TCP_PORT: u16 = 30303; + let fork_id = MAINNET.latest_fork_id(); + + let config = Config::builder(TCP_PORT).fork(network_key::ETH, fork_id).build(); + + let sk = SecretKey::new(&mut thread_rng()); + let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); + + let decoded_fork_id = + ForkId::decode(&mut enr.get_raw_rlp(network_key::ETH).unwrap()).unwrap(); + + assert_eq!(fork_id, decoded_fork_id); + assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 + } } diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 72ea5fc0e..12b024a2f 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -2,7 +2,7 @@ use metrics::{Counter, Gauge}; use reth_metrics::Metrics; -use crate::config::{ETH, ETH2, OPSTACK}; +use crate::network_key::{ETH, ETH2, OPSTACK}; /// Information tracked by [`Discv5`](crate::Discv5). #[derive(Debug, Default, Clone)] @@ -91,13 +91,14 @@ impl DiscoveredPeersMetrics { #[derive(Metrics, Clone)] #[metrics(scope = "discv5")] pub struct AdvertisedChainMetrics { - /// Frequency of node records with a kv-pair with [`OPSTACK`] as key. + /// Frequency of node records with a kv-pair with [`OPSTACK`](crate::network_key) as + /// key. opstack: Counter, - /// Frequency of node records with a kv-pair with [`ETH`] as key. + /// Frequency of node records with a kv-pair with [`ETH`](crate::network_key) as key. eth: Counter, - /// Frequency of node records with a kv-pair with [`ETH2`] as key. + /// Frequency of node records with a kv-pair with [`ETH2`](crate::network_key) as key. eth2: Counter, } diff --git a/crates/net/discv5/src/network_key.rs b/crates/net/discv5/src/network_key.rs new file mode 100644 index 000000000..47576e5b2 --- /dev/null +++ b/crates/net/discv5/src/network_key.rs @@ -0,0 +1,11 @@ +//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network a node +//! belongs to. + +/// ENR fork ID kv-pair key, for an Ethereum L1 EL node. +pub const ETH: &[u8] = b"eth"; + +/// ENR fork ID kv-pair key, for an Ethereum L1 CL node. +pub const ETH2: &[u8] = b"eth2"; + +/// ENR fork ID kv-pair key, for an Optimism CL node. +pub const OPSTACK: &[u8] = b"opstack"; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index f9c9212d9..3e89a1f3a 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -9,11 +9,12 @@ use crate::{ NetworkHandle, NetworkManager, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; -use reth_discv5::config::OPSTACK; +use reth_discv5::network_key; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, + mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, + PeerId, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -121,15 +122,19 @@ impl NetworkConfig { ) -> Self { let rlpx_port = self.listener_addr.port(); let chain = self.chain_spec.chain; - let fork_id = self.status.forkid; + let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); let mut builder = reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); - if chain.is_optimism() { - builder = builder.fork(OPSTACK, fork_id) + if chain.named() == Some(NamedChain::Mainnet) { + builder = builder.fork(network_key::ETH, fork_id) } + // todo: set op EL fork id + /*if chain.is_optimism() { + builder = builder.fork(network_key::, fork_id) + }*/ self.set_discovery_v5(f(builder)) } From 1f84c27c35b63a4cd44558292f992e4995da40d7 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 12:35:21 -0400 Subject: [PATCH 028/250] chore: delete empty files (#7843) --- crates/optimism/node/src/evm/execute.rs | 744 ------------------------ crates/optimism/node/src/evm/mod.rs | 2 - 2 files changed, 746 deletions(-) delete mode 100644 crates/optimism/node/src/evm/execute.rs delete mode 100644 crates/optimism/node/src/evm/mod.rs diff --git a/crates/optimism/node/src/evm/execute.rs b/crates/optimism/node/src/evm/execute.rs deleted file mode 100644 index f51c6cd3b..000000000 --- a/crates/optimism/node/src/evm/execute.rs +++ /dev/null @@ -1,744 +0,0 @@ -//! Optimism block executor. - -use crate::OptimismEvmConfig; -use reth_evm::{ - execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, - }, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, - provider::ProviderError, -}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, - GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, - Withdrawals, B256, U256, -}; -use reth_provider::BundleStateWithReceipts; -use reth_revm::{ - batch::{BlockBatchRecord, BlockExecutorStats}, - db::states::bundle_state::BundleRetention, - optimism::ensure_create2_deployer, - processor::compare_receipts_root_and_logs_bloom, - stack::InspectorStack, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, - Evm, State, -}; -use revm_primitives::{ - db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, -}; -use std::sync::Arc; -use tracing::{debug, trace}; - -/// Provides executors to execute regular ethereum blocks -#[derive(Debug, Clone)] -pub struct OpExecutorProvider { - chain_spec: Arc, - evm_config: EvmConfig, - inspector: Option, - prune_modes: PruneModes, -} - -impl OpExecutorProvider { - /// Creates a new default optimism executor provider. - pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec, Default::default()) - } -} - -impl OpExecutorProvider { - /// Creates a new executor provider. - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self - } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self - } -} - -impl OpExecutorProvider -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, -{ - fn op_executor(&self, db: DB) -> OpBlockExecutor - where - DB: Database, - { - OpBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) - .with_inspector(self.inspector.clone()) - } -} - -impl ExecutorProvider for OpExecutorProvider -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, -{ - type Executor> = OpBlockExecutor; - - type BatchExecutor> = OpBatchExecutor; - fn executor(&self, db: DB) -> Self::Executor - where - DB: Database, - { - self.op_executor(db) - } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database, - { - let executor = self.op_executor(db); - OpBatchExecutor { - executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), - stats: BlockExecutorStats::default(), - } - } -} - -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -struct OpEvmExecutor { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl OpEvmExecutor -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, -{ - /// Executes the transactions in the block and returns the receipts. - /// - /// This applies the pre-execution changes, and executes the transactions. - /// - /// # Note - /// - /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( - &mut self, - block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - ) -> Result<(Vec, u64), BlockExecutionError> - where - DB: Database, - { - // apply pre execution changes - apply_beacon_root_contract_call( - &self.chain_spec, - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - - // execute transactions - let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( - |_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - }, - )?; - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; - - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); - - // Execute transaction. - let ResultAndState { result, state } = evm.transact().map_err(move |err| { - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: err.into(), - } - })?; - - trace!( - target: "evm", - ?transaction, - "Executed transaction" - ); - - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - drop(evm); - - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - - Ok((receipts, cumulative_gas_used)) - } -} - -/// A basic Ethereum block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct OpBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: OpEvmExecutor, - /// The state to use for execution - state: State, - /// Optional inspector stack for debugging - inspector: Option, -} - -impl OpBlockExecutor { - /// Creates a new Ethereum block executor. - pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self - } - - #[inline] - fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl OpBlockExecutor -where - EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, - DB: Database, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - EvmConfig::fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - self.chain_spec(), - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block and the total gas used. - /// - /// Returns an error if execution fails or receipt verification fails. - fn execute_and_verify( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - - let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - // Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is required for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipt_optimism( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - } - - Ok((receipts, gas_used)) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - let balance_increments = post_block_balance_increments( - self.chain_spec(), - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(()) - } -} - -impl Executor for OpBlockExecutor -where - EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, - DB: Database, -{ - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; - type Error = BlockExecutionError; - - /// Executes the block and commits the state changes. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); - - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) - } -} - -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct OpBatchExecutor { - /// The executor used to execute blocks. - executor: OpBlockExecutor, - /// Keeps track of the batch and record receipts based on the configured prune mode - batch_record: BlockBatchRecord, - stats: BlockExecutorStats, -} - -impl OpBatchExecutor { - /// Returns the receipts of the executed blocks. - pub fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() - } -} - -impl BatchExecutor for OpBatchExecutor -where - EvmConfig: ConfigureEvm, - // TODO: get rid of this - EvmConfig: ConfigureEvmEnv, - DB: Database, -{ - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; - type Error = BlockExecutionError; - - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) - } - - fn finalize(mut self) -> Self::Output { - // TODO: track stats - self.stats.log_debug(); - - BundleStateWithReceipts::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } -} - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_primitives::{ - b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, - }; - use reth_revm::database::StateProviderDatabase; - use revm::L1_BLOCK_CONTRACT; - use std::{collections::HashMap, str::FromStr}; - - use reth_revm::test_utils::StateProviderTest; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::new(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: TransactionKind::Call(addr), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: TransactionKind::Call(addr), - gas_limit: 21_000, - ..Default::default() - }), - Signature::default(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_one( - ( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: TransactionKind::Call(addr), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: TransactionKind::Call(addr), - gas_limit: 21_000, - ..Default::default() - }), - Signature::optimism_deposit_tx_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_one( - ( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .expect("Executing a block while canyon is active should not fail"); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/optimism/node/src/evm/mod.rs b/crates/optimism/node/src/evm/mod.rs deleted file mode 100644 index 139597f9c..000000000 --- a/crates/optimism/node/src/evm/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ - - From 784d8dc597fa8eebf1ef45b7dcfa9374ea94e669 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Wed, 24 Apr 2024 14:19:33 -0400 Subject: [PATCH 029/250] refactor: replace OP error variant with general purpose error (#7844) Co-authored-by: Oliver Nordbjerg --- crates/optimism/node/src/rpc.rs | 3 ++- crates/rpc/rpc-engine-api/src/error.rs | 26 ++++++++++++-------------- crates/rpc/rpc-types/src/eth/error.rs | 9 +++++++++ crates/rpc/rpc-types/src/eth/mod.rs | 1 + crates/rpc/rpc-types/src/lib.rs | 1 + crates/rpc/rpc-types/src/mev.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 10 +++------- crates/rpc/rpc/src/eth/optimism.rs | 6 ++---- 8 files changed, 31 insertions(+), 27 deletions(-) create mode 100644 crates/rpc/rpc-types/src/eth/error.rs diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 66eb82450..25a399e18 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -3,9 +3,10 @@ use jsonrpsee::types::ErrorObject; use reqwest::Client; use reth_rpc::eth::{ - error::{EthApiError, EthResult, ToRpcError}, + error::{EthApiError, EthResult}, traits::RawTransactionForwarder, }; +use reth_rpc_types::ToRpcError; use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 01b99a54f..57318d0d6 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -5,6 +5,7 @@ use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError use reth_engine_primitives::EngineObjectValidationError; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{B256, U256}; +use reth_rpc_types::ToRpcError; use thiserror::Error; /// The Engine API result type @@ -86,11 +87,16 @@ pub enum EngineApiError { /// The payload or attributes are known to be malformed before processing. #[error(transparent)] EngineObjectValidationError(#[from] EngineObjectValidationError), - /// If the optimism feature flag is enabled, the payload attributes must have a present - /// gas limit for the forkchoice updated method. - #[cfg(feature = "optimism")] - #[error("Missing gas limit in payload attributes")] - MissingGasLimitInPayloadAttributes, + /// Any other error + #[error("{0}")] + Other(Box), +} + +impl EngineApiError { + /// Crates a new [EngineApiError::Other] variant. + pub fn other(err: E) -> Self { + Self::Other(Box::new(err)) + } } /// Helper type to represent the `error` field in the error response: @@ -188,15 +194,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { ) } }, - // Optimism errors - #[cfg(feature = "optimism")] - EngineApiError::MissingGasLimitInPayloadAttributes => { - jsonrpsee_types::error::ErrorObject::owned( - INVALID_PARAMS_CODE, - INVALID_PARAMS_MSG, - Some(ErrorData::new(error)), - ) - } // Any other server error EngineApiError::TerminalTD { .. } | EngineApiError::TerminalBlockHash { .. } | @@ -206,6 +203,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { SERVER_ERROR_MSG, Some(ErrorData::new(error)), ), + EngineApiError::Other(err) => err.to_rpc_error(), } } } diff --git a/crates/rpc/rpc-types/src/eth/error.rs b/crates/rpc/rpc-types/src/eth/error.rs new file mode 100644 index 000000000..e8d55b087 --- /dev/null +++ b/crates/rpc/rpc-types/src/eth/error.rs @@ -0,0 +1,9 @@ +//! Implementation specific Errors for the `eth_` namespace. + +use jsonrpsee_types::ErrorObject; + +/// A tait to convert an error to an RPC error. +pub trait ToRpcError: std::error::Error + Send + Sync + 'static { + /// Converts the error to a JSON-RPC error object. + fn to_rpc_error(&self) -> ErrorObject<'static>; +} diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index dd36e7fd5..6313dbeed 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -1,5 +1,6 @@ //! Ethereum related types +pub(crate) mod error; pub mod transaction; // re-export diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 964144ed6..68ad11c6e 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -37,6 +37,7 @@ pub use eth::{ engine::{ ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, }, + error::ToRpcError, transaction::{self, TransactionKind, TransactionRequest, TypedTransactionRequest}, }; diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index 2137e1ecf..ae94375db 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -706,7 +706,7 @@ mod u256_numeric_string { match val { serde_json::Value::String(s) => { if let Ok(val) = s.parse::() { - return Ok(U256::from(val)) + return Ok(U256::from(val)); } U256::from_str(&s).map_err(de::Error::custom) } diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index d8add6397..75fbcc220 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -6,7 +6,9 @@ use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_interfaces::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; use reth_revm::tracing::{js::JsInspectorError, MuxError}; -use reth_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; +use reth_rpc_types::{ + error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, +}; use reth_transaction_pool::error::{ Eip4844PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, @@ -17,12 +19,6 @@ use std::time::Duration; /// Result alias pub type EthResult = Result; -/// A tait for custom rpc errors used by [EthApiError::Other]. -pub trait ToRpcError: std::error::Error + Send + Sync + 'static { - /// Converts the error to a JSON-RPC error object. - fn to_rpc_error(&self) -> ErrorObject<'static>; -} - /// Errors that can occur when interacting with the `eth_` namespace #[derive(Debug, thiserror::Error)] pub enum EthApiError { diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs index 2871058f8..24f6f36ff 100644 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ b/crates/rpc/rpc/src/eth/optimism.rs @@ -1,11 +1,9 @@ //! Optimism specific types. use jsonrpsee::types::ErrorObject; +use reth_rpc_types::ToRpcError; -use crate::{ - eth::error::{EthApiError, ToRpcError}, - result::internal_rpc_err, -}; +use crate::{eth::error::EthApiError, result::internal_rpc_err}; /// Eth Optimism Api Error #[cfg(feature = "optimism")] From 90da3205de4e7353848bd21cfe7832a0b56d3a50 Mon Sep 17 00:00:00 2001 From: greged93 <82421016+greged93@users.noreply.github.com> Date: Wed, 24 Apr 2024 20:29:19 +0200 Subject: [PATCH 030/250] refactor: remove unused map_err (#7837) Co-authored-by: Oliver Nordbjerg --- crates/rpc/rpc/src/debug.rs | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 500f786d3..b212d1636 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -101,24 +101,16 @@ where env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), handler_cfg: cfg.handler_cfg, }; - let (result, state_changes) = this - .trace_transaction( - opts.clone(), - env, - &mut db, - Some(TransactionContext { - block_hash, - tx_hash: Some(tx_hash), - tx_index: Some(index), - }), - ) - .map_err(|err| { - results.push(TraceResult::Error { - error: err.to_string(), - tx_hash: Some(tx_hash), - }); - err - })?; + let (result, state_changes) = this.trace_transaction( + opts.clone(), + env, + &mut db, + Some(TransactionContext { + block_hash, + tx_hash: Some(tx_hash), + tx_index: Some(index), + }), + )?; results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { From 66c6cbc573505d7d6dd55cc54068e89e8fc97898 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 24 Apr 2024 19:34:11 +0100 Subject: [PATCH 031/250] feat(exex): do not log ID on ExEx start (#7846) --- crates/node-builder/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs index 327d906b3..28e447775 100644 --- a/crates/node-builder/src/builder.rs +++ b/crates/node-builder/src/builder.rs @@ -630,7 +630,7 @@ where // spawn it as a crit task executor.spawn_critical("exex", async move { - info!(target: "reth::cli", id, "ExEx started"); + info!(target: "reth::cli", "ExEx started"); match exex.await { Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), Err(err) => panic!("ExEx {id} crashed: {err}"), From ddc5ed326390ba30fd3382bff7df0da77d99da20 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Wed, 24 Apr 2024 20:36:19 +0200 Subject: [PATCH 032/250] fix(rpc): correct `Other` error msg (#7845) --- crates/rpc/rpc/src/eth/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 75fbcc220..203b5bbd7 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -115,7 +115,7 @@ pub enum EthApiError { #[error(transparent)] MuxTracerError(#[from] MuxError), /// Any other error - #[error("0")] + #[error("{0}")] Other(Box), } From 659059c67fc8c3bd562b0815d5e12af4c29adad0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 24 Apr 2024 22:18:07 +0200 Subject: [PATCH 033/250] feat: split nodebuilder generics into separate states (#7847) --- bin/reth/src/cli/mod.rs | 4 +- bin/reth/src/commands/node/mod.rs | 4 +- crates/e2e-test-utils/src/lib.rs | 8 +- crates/node-builder/src/builder.rs | 1425 ----------------- crates/node-builder/src/builder/mod.rs | 619 +++++++ crates/node-builder/src/builder/states.rs | 237 +++ crates/node-builder/src/components/builder.rs | 30 +- crates/node-builder/src/components/mod.rs | 56 +- crates/node-builder/src/launch.rs | 558 +++++++ crates/node-builder/src/lib.rs | 3 + crates/node-core/src/node_config.rs | 2 +- crates/node/api/src/node.rs | 84 - 12 files changed, 1492 insertions(+), 1538 deletions(-) delete mode 100644 crates/node-builder/src/builder.rs create mode 100644 crates/node-builder/src/builder/mod.rs create mode 100644 crates/node-builder/src/builder/states.rs create mode 100644 crates/node-builder/src/launch.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index d511d7182..34fd09456 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -14,7 +14,7 @@ use crate::{ use clap::{value_parser, Parser, Subcommand}; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_node_builder::{InitState, WithLaunchContext}; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_primitives::ChainSpec; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -130,7 +130,7 @@ impl Cli { /// ```` pub fn run(mut self, launcher: L) -> eyre::Result<()> where - L: FnOnce(WithLaunchContext, InitState>, Ext) -> Fut, + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, Fut: Future>, { // add network name to logs dir diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 349130486..5f95c534d 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -11,7 +11,7 @@ use crate::{ use clap::{value_parser, Args, Parser}; use reth_cli_runner::CliContext; use reth_db::{init_db, DatabaseEnv}; -use reth_node_builder::{InitState, NodeBuilder, WithLaunchContext}; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{node_config::NodeConfig, version}; use reth_primitives::ChainSpec; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; @@ -136,7 +136,7 @@ impl NodeCommand { /// closure. pub async fn execute(self, ctx: CliContext, launcher: L) -> eyre::Result<()> where - L: FnOnce(WithLaunchContext, InitState>, Ext) -> Fut, + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, Fut: Future>, { tracing::info!(target: "reth::cli", version = ?version::SHORT_VERSION, "Starting reth"); diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 8e57eebed..8fdaa044b 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -6,8 +6,8 @@ use reth::{ }; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ - components::{NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - FullNodeComponentsAdapter, FullNodeTypesAdapter, + components::{Components, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + FullNodeTypesAdapter, NodeAdapter, }; use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; @@ -59,7 +59,7 @@ where }; // Create nodes and peer them - let mut nodes: Vec> = Vec::with_capacity(num_nodes); + let mut nodes: Vec> = Vec::with_capacity(num_nodes); for idx in 0..num_nodes { let mut node_config = NodeConfig::test() @@ -110,4 +110,4 @@ type TmpNodeAdapter = FullNodeTypesAdapter = - NodeTestContext, TmpPool>>; + NodeTestContext, Components, TmpPool>>>; diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs deleted file mode 100644 index 28e447775..000000000 --- a/crates/node-builder/src/builder.rs +++ /dev/null @@ -1,1425 +0,0 @@ -//! Customizable node builder. - -#![allow(clippy::type_complexity, missing_debug_implementations)] - -use crate::{ - components::{ComponentsBuilder, NodeComponents, NodeComponentsBuilder, PoolBuilder}, - exex::BoxedLaunchExEx, - hooks::NodeHooks, - node::FullNode, - rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, - Node, NodeHandle, -}; -use eyre::Context; -use futures::{future, future::Either, stream, stream_select, Future, StreamExt}; -use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensus, BeaconConsensusEngine, -}; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; -use reth_config::config::EtlConfig; -use reth_consensus::Consensus; -use reth_db::{ - database::Database, - database_metrics::{DatabaseMetadata, DatabaseMetrics}, - test_utils::{create_test_rw_db, TempDatabase}, - DatabaseEnv, -}; -use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::p2p::either::EitherDownloader; -use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; -use reth_node_api::{ - FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, -}; -use reth_node_core::{ - cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig}, - dirs::{ChainPath, DataDirPath, MaybePlatformPath}, - engine_api_store::EngineApiStore, - engine_skip_fcu::EngineApiSkipFcu, - exit::NodeExitFuture, - init::init_genesis, - node_config::NodeConfig, - primitives::{kzg::KzgSettings, Head}, - utils::write_peers_to_file, -}; -use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, format_ether, ChainSpec}; -use reth_provider::{ - providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, -}; -use reth_prune::PrunerBuilder; -use reth_revm::EvmProcessorFactory; -use reth_rpc_engine_api::EngineApi; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, error, info}; -use reth_transaction_pool::{PoolConfig, TransactionPool}; -use std::{cmp::max, str::FromStr, sync::Arc, thread::available_parallelism}; -use tokio::sync::{mpsc::unbounded_channel, oneshot}; - -/// The builtin provider type of the reth node. -// Note: we need to hardcode this because custom components might depend on it in associated types. -type RethFullProviderType = BlockchainProvider; - -type RethFullAdapter = FullNodeTypesAdapter>; - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// Declaratively construct a node. -/// -/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing -/// components of a node. -/// -/// ## Order -/// -/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for -/// example) and then proceeds to configure the core static types of the node: [NodeTypes], these -/// include the node's primitive types and the node's engine types. -/// -/// Next all stateful components of the node are configured, these include the -/// [ConfigureEvm](reth_node_api::evm::ConfigureEvm), the database [Database] and all the -/// components of the node that are downstream of those types, these include: -/// -/// - The transaction pool: [PoolBuilder] -/// - The network: [NetworkBuilder](crate::components::NetworkBuilder) -/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder) -/// -/// Once all the components are configured, the node is ready to be launched. -/// -/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the -/// configured components and can interact with the node. -/// -/// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. -/// -/// The [NodeBuilder::node] function configures the node's types and components in one step. -/// -/// ## Components -/// -/// All components are configured with a [NodeComponentsBuilder] that is responsible for actually -/// creating the node components during the launch process. The [ComponentsBuilder] is a general -/// purpose implementation of the [NodeComponentsBuilder] trait that can be used to configure the -/// network, transaction pool and payload builder of the node. It enforces the correct order of -/// configuration, for example the network and the payload builder depend on the transaction pool -/// type that is configured first. -/// -/// All builder traits are generic over the node types and are invoked with the [BuilderContext] -/// that gives access to internals of the that are needed to configure the components. This include -/// the original config, chain spec, the database provider and the task executor, -/// -/// ## Hooks -/// -/// Once all the components are configured, the builder can be used to set hooks that are run at -/// specific points in the node's lifecycle. This way custom services can be spawned before the node -/// is launched [NodeBuilder::on_component_initialized], or once the rpc server(s) are launched -/// [NodeBuilder::on_rpc_started]. The [NodeBuilder::extend_rpc_modules] can be used to inject -/// custom rpc modules into the rpc server before it is launched. See also [RpcContext] -/// All hooks accept a closure that is then invoked at the appropriate time in the node's launch -/// process. -/// -/// ## Flow -/// -/// The [NodeBuilder] is intended to sit behind a CLI that provides the necessary [NodeConfig] -/// input: [NodeBuilder::new] -/// -/// From there the builder is configured with the node's types, components, and hooks, then launched -/// with the [NodeBuilder::launch] method. On launch all the builtin internals, such as the -/// `Database` and its providers [BlockchainProvider] are initialized before the configured -/// [NodeComponentsBuilder] is invoked with the [BuilderContext] to create the transaction pool, -/// network, and payload builder components. When the RPC is configured, the corresponding hooks are -/// invoked to allow for custom rpc modules to be injected into the rpc server: -/// [NodeBuilder::extend_rpc_modules] -/// -/// Finally all components are created and all services are launched and a [NodeHandle] is returned -/// that can be used to interact with the node: [FullNode] -/// -/// The following diagram shows the flow of the node builder from CLI to a launched node. -/// -/// include_mmd!("docs/mermaid/builder.mmd") -/// -/// ## Internals -/// -/// The node builder is fully type safe, it uses the [NodeTypes] trait to enforce that all -/// components are configured with the correct types. However the database types and with that the -/// provider trait implementations are currently created by the builder itself during the launch -/// process, hence the database type is not part of the [NodeTypes] trait and the node's components, -/// that depend on the database, are configured separately. In order to have a nice trait that -/// encapsulates the entire node the [FullNodeComponents] trait was introduced. This trait has -/// convenient associated types for all the components of the node. After [NodeBuilder::launch] the -/// [NodeHandle] contains an instance of [FullNode] that implements the [FullNodeComponents] trait -/// and has access to all the components of the node. Internally the node builder uses several -/// generic adapter types that are then map to traits with associated types for ease of use. -/// -/// ### Limitations -/// -/// Currently the launch process is limited to ethereum nodes and requires all the components -/// specified above. It also expect beacon consensus with the ethereum engine API that is configured -/// by the builder itself during launch. This might change in the future. -/// -/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html -pub struct NodeBuilder { - /// All settings for how the node should be configured. - config: NodeConfig, - /// State of the node builder process. - state: State, - /// The configured database for the node. - database: DB, -} - -impl NodeBuilder { - /// Returns a reference to the node builder's config. - pub fn config(&self) -> &NodeConfig { - &self.config - } - - /// Loads the reth config with the given datadir root - fn load_config(&self, data_dir: &ChainPath) -> eyre::Result { - let config_path = self.config.config.clone().unwrap_or_else(|| data_dir.config_path()); - - let mut config = confy::load_path::(&config_path) - .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - - info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); - - // Update the config with the command line arguments - config.peers.trusted_nodes_only = self.config.network.trusted_only; - - if !self.config.network.trusted_peers.is_empty() { - info!(target: "reth::cli", "Adding trusted nodes"); - self.config.network.trusted_peers.iter().for_each(|peer| { - config.peers.trusted_nodes.insert(*peer); - }); - } - - Ok(config) - } -} - -impl NodeBuilder<(), InitState> { - /// Create a new [`NodeBuilder`]. - pub fn new(config: NodeConfig) -> Self { - Self { config, database: (), state: InitState::default() } - } -} - -impl NodeBuilder { - /// Configures the underlying database that the node will use. - pub fn with_database(self, database: D) -> NodeBuilder { - NodeBuilder { config: self.config, state: self.state, database } - } - - /// Preconfigure the builder with the context to launch the node. - /// - /// This provides the task executor and the data directory for the node. - pub fn with_launch_context( - self, - task_executor: TaskExecutor, - data_dir: ChainPath, - ) -> WithLaunchContext { - WithLaunchContext { builder: self, task_executor, data_dir } - } - - /// Creates an _ephemeral_ preconfigured node for testing purposes. - pub fn testing_node( - self, - task_executor: TaskExecutor, - ) -> WithLaunchContext>, InitState> { - let db = create_test_rw_db(); - let db_path_str = db.path().to_str().expect("Path is not valid unicode"); - let path = - MaybePlatformPath::::from_str(db_path_str).expect("Path is not valid"); - let data_dir = path.unwrap_or_chain_default(self.config.chain.chain); - - WithLaunchContext { builder: self.with_database(db), task_executor, data_dir } - } -} - -impl NodeBuilder -where - DB: Database + Unpin + Clone + 'static, -{ - /// Configures the types of the node. - pub fn with_types(self, types: T) -> NodeBuilder> - where - T: NodeTypes, - { - NodeBuilder { - config: self.config, - state: TypesState { adapter: FullNodeTypesAdapter::new(types) }, - database: self.database, - } - } - - /// Preconfigures the node with a specific node implementation. - /// - /// This is a convenience method that sets the node's types and components in one call. - pub fn node( - self, - node: N, - ) -> NodeBuilder< - DB, - ComponentsState< - N, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.with_types(node.clone()).with_components(node.components()) - } -} - -impl NodeBuilder> -where - Types: NodeTypes, - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the node's components. - pub fn with_components( - self, - components_builder: Components, - ) -> NodeBuilder< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > - where - Components: - NodeComponentsBuilder>>, - { - NodeBuilder { - config: self.config, - database: self.database, - state: ComponentsState { - types: self.state.adapter.types, - components_builder, - hooks: NodeHooks::new(), - rpc: RpcHooks::new(), - exexs: Vec::new(), - }, - } - } -} - -impl - NodeBuilder< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - Types: NodeTypes, - Components: NodeComponentsBuilder>>, -{ - /// Apply a function to the components builder. - pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { - Self { - config: self.config, - database: self.database, - state: ComponentsState { - types: self.state.types, - components_builder: f(self.state.components_builder), - hooks: self.state.hooks, - rpc: self.state.rpc, - exexs: self.state.exexs, - }, - } - } - - /// Sets the hook that is run once the node's components are initialized. - pub fn on_component_initialized(mut self, hook: F) -> Self - where - F: Fn( - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.hooks.set_on_component_initialized(hook); - self - } - - /// Sets the hook that is run once the node has started. - pub fn on_node_started(mut self, hook: F) -> Self - where - F: Fn( - FullNode< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.hooks.set_on_node_started(hook); - self - } - - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.rpc.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.rpc.set_extend_rpc_modules(hook); - self - } - - /// Installs an ExEx (Execution Extension) in the node. - /// - /// # Note - /// - /// The ExEx ID must be unique. - pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self - where - F: Fn( - ExExContext< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> R - + Send - + 'static, - R: Future> + Send, - E: Future> + Send, - { - self.state.exexs.push((exex_id.into(), Box::new(exex))); - self - } - - /// Launches the node and returns a handle to it. - /// - /// This bootstraps the node internals, creates all the components with the provider - /// [NodeComponentsBuilder] and launches the node. - /// - /// Returns a [NodeHandle] that can be used to interact with the node. - pub async fn launch( - self, - executor: TaskExecutor, - data_dir: ChainPath, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > { - // get config from file - let reth_config = self.load_config(&data_dir)?; - - let Self { - config, - state: ComponentsState { types, components_builder, hooks, rpc, exexs: _ }, - database, - } = self; - - // Raise the fd limit of the process. - // Does not do anything on windows. - fdlimit::raise_fd_limit()?; - - // Limit the global rayon thread pool, reserving 2 cores for the rest of the system - let _ = ThreadPoolBuilder::new() - .num_threads( - available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), - ) - .build_global() - .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); - - let provider_factory = ProviderFactory::new( - database.clone(), - Arc::clone(&config.chain), - data_dir.static_files_path(), - )? - .with_static_files_metrics(); - info!(target: "reth::cli", "Database opened"); - - let prometheus_handle = config.install_prometheus_recorder()?; - config - .start_metrics_endpoint( - prometheus_handle, - database.clone(), - provider_factory.static_file_provider(), - executor.clone(), - ) - .await?; - - debug!(target: "reth::cli", chain=%config.chain.chain, genesis=?config.chain.genesis_hash(), "Initializing genesis"); - - let genesis_hash = init_genesis(provider_factory.clone())?; - - info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); - - // setup the consensus instance - let consensus: Arc = if config.dev.dev { - Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) - } else { - Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) - }; - - debug!(target: "reth::cli", "Spawning stages metrics listener task"); - let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); - let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); - executor.spawn_critical("stages metrics listener task", sync_metrics_listener); - - let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); - - // Configure the blockchain tree for the node - let evm_config = types.evm_config(); - let tree_config = BlockchainTreeConfig::default(); - let tree_externals = TreeExternals::new( - provider_factory.clone(), - consensus.clone(), - EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), - ); - let tree = BlockchainTree::new( - tree_externals, - tree_config, - prune_config.as_ref().map(|config| config.segments.clone()), - )? - .with_sync_metrics_tx(sync_metrics_tx.clone()); - - let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - debug!(target: "reth::cli", "configured blockchain tree"); - - // fetch the head block from the database - let head = - config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; - - // setup the blockchain provider - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; - - let ctx = BuilderContext::new( - head, - blockchain_db, - executor, - data_dir, - config, - reth_config, - evm_config.clone(), - ); - - debug!(target: "reth::cli", "creating components"); - let NodeComponents { transaction_pool, network, payload_builder } = - components_builder.build_components(&ctx).await?; - - let BuilderContext { - provider: blockchain_db, - executor, - data_dir, - mut config, - mut reth_config, - .. - } = ctx; - - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - let node_components = FullNodeComponentsAdapter { - evm_config: evm_config.clone(), - pool: transaction_pool.clone(), - network: network.clone(), - provider: blockchain_db.clone(), - payload_builder: payload_builder.clone(), - executor: executor.clone(), - }; - debug!(target: "reth::cli", "calling on_component_initialized hook"); - on_component_initialized.on_event(node_components.clone())?; - - // spawn exexs - let mut exex_handles = Vec::with_capacity(self.state.exexs.len()); - let mut exexs = Vec::with_capacity(self.state.exexs.len()); - for (id, exex) in self.state.exexs { - // create a new exex handle - let (handle, events, notifications) = ExExHandle::new(id.clone()); - exex_handles.push(handle); - - // create the launch context for the exex - let context = ExExContext { - head, - provider: blockchain_db.clone(), - task_executor: executor.clone(), - data_dir: data_dir.clone(), - config: config.clone(), - reth_config: reth_config.clone(), - pool: transaction_pool.clone(), - events, - notifications, - }; - - let executor = executor.clone(); - exexs.push(async move { - debug!(target: "reth::cli", id, "spawning exex"); - let span = reth_tracing::tracing::info_span!("exex", id); - let _enter = span.enter(); - - // init the exex - let exex = exex.launch(context).await.unwrap(); - - // spawn it as a crit task - executor.spawn_critical("exex", async move { - info!(target: "reth::cli", "ExEx started"); - match exex.await { - Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), - Err(err) => panic!("ExEx {id} crashed: {err}"), - } - }); - }); - } - - future::join_all(exexs).await; - - // spawn exex manager - let exex_manager_handle = if !exex_handles.is_empty() { - debug!(target: "reth::cli", "spawning exex manager"); - // todo(onbjerg): rm magic number - let exex_manager = ExExManager::new(exex_handles, 1024); - let exex_manager_handle = exex_manager.handle(); - executor.spawn_critical("exex manager", async move { - exex_manager.await.expect("exex manager crashed"); - }); - - // send notifications from the blockchain tree to exex manager - let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); - let mut handle = exex_manager_handle.clone(); - executor.spawn_critical("exex manager blockchain tree notifications", async move { - while let Ok(notification) = canon_state_notifications.recv().await { - handle - .send_async(notification.into()) - .await - .expect("blockchain tree notification could not be sent to exex manager"); - } - }); - - info!(target: "reth::cli", "ExEx Manager started"); - - Some(exex_manager_handle) - } else { - None - }; - - // create pipeline - let network_client = network.fetch_client().await?; - let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); - - if let Some(skip_fcu_threshold) = config.debug.skip_fcu { - debug!(target: "reth::cli", "spawning skip FCU task"); - let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); - let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - executor.spawn_critical( - "skip FCU interceptor", - engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), - ); - consensus_engine_rx = skip_fcu_rx; - } - - if let Some(store_path) = config.debug.engine_api_store.clone() { - debug!(target: "reth::cli", "spawning engine API store"); - let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); - let engine_api_store = EngineApiStore::new(store_path); - executor.spawn_critical( - "engine api interceptor", - engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), - ); - consensus_engine_rx = engine_intercept_rx; - }; - - let max_block = config.max_block(&network_client, provider_factory.clone()).await?; - let mut hooks = EngineHooks::new(); - - let static_file_producer = StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), - prune_config.clone().unwrap_or_default().segments, - ); - let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); - info!(target: "reth::cli", "StaticFileProducer initialized"); - - // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if reth_config.stages.etl.dir.is_none() { - reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); - } - - // Configure the pipeline - let pipeline_exex_handle = - exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if config.dev.dev { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let pending_transactions_listener = transaction_pool.pending_transactions_listener(); - - let mining_mode = if let Some(interval) = config.dev.block_time { - MiningMode::interval(interval) - } else if let Some(max_transactions) = config.dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) - } else { - info!(target: "reth::cli", "No mining mode specified, defaulting to ReadyTransaction"); - MiningMode::instant(1, pending_transactions_listener) - }; - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - Arc::clone(&config.chain), - blockchain_db.clone(), - transaction_pool.clone(), - consensus_engine_tx.clone(), - canon_state_notification_sender, - mining_mode, - evm_config.clone(), - ) - .build(); - - let mut pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, - client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - pipeline_exex_handle, - ) - .await?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - executor.spawn(Box::pin(task)); - - (pipeline, EitherDownloader::Left(client)) - } else { - let pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, - network_client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - pipeline_exex_handle, - ) - .await?; - - (pipeline, EitherDownloader::Right(network_client)) - }; - - let pipeline_events = pipeline.events(); - - let initial_target = config.initial_pipeline_target(genesis_hash); - - let prune_config = prune_config.unwrap_or_default(); - let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) - .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(config.chain.prune_delete_limit) - .timeout(PrunerBuilder::DEFAULT_TIMEOUT); - if let Some(exex_manager_handle) = &exex_manager_handle { - pruner_builder = - pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); - } - - let mut pruner = pruner_builder.build(provider_factory.clone()); - - let pruner_events = pruner.events(); - hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); - info!(target: "reth::cli", ?prune_config, "Pruner initialized"); - - // Configure the consensus engine - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( - client, - pipeline, - blockchain_db.clone(), - Box::new(executor.clone()), - Box::new(network.clone()), - max_block, - config.debug.continuous, - payload_builder.clone(), - initial_target, - reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, - consensus_engine_tx, - consensus_engine_rx, - hooks, - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - let events = stream_select!( - network.event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), - pipeline_events.map(Into::into), - if config.debug.tip.is_none() && !config.dev.dev { - Either::Left( - ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) - .map(Into::into), - ) - } else { - Either::Right(stream::empty()) - }, - pruner_events.map(Into::into), - static_file_producer_events.map(Into::into) - ); - executor.spawn_critical( - "events task", - node::handle_events(Some(network.clone()), Some(head.number), events, database.clone()), - ); - - let engine_api = EngineApi::new( - blockchain_db.clone(), - config.chain.clone(), - beacon_engine_handle, - payload_builder.into(), - Box::new(executor.clone()), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - - // extract the jwt secret from the args if possible - let default_jwt_path = data_dir.jwt_path(); - let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; - - // adjust rpc port numbers based on instance number - config.adjust_instance_ports(); - - // Start RPC servers - - let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( - node_components.clone(), - engine_api, - &config, - jwt_secret, - rpc, - ) - .await?; - - // in dev mode we generate 20 random dev-signer accounts - if config.dev.dev { - rpc_registry.eth_api().with_dev_accounts(); - } - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - executor.spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - let FullNodeComponentsAdapter { - evm_config, - pool, - network, - provider, - payload_builder, - executor, - } = node_components; - - let full_node = FullNode { - evm_config, - pool, - network, - provider, - payload_builder, - task_executor: executor, - rpc_server_handles, - rpc_registry, - config, - data_dir, - }; - // Notify on node started - on_node_started.on_event(full_node.clone())?; - - let handle = NodeHandle { - node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), - node: full_node, - }; - - Ok(handle) - } - - /// Check that the builder can be launched - /// - /// This is useful when writing tests to ensure that the builder is configured correctly. - pub fn check_launch(self) -> Self { - self - } -} - -/// A [NodeBuilder] with it's launch context already configured. -/// -/// This exposes the same methods as [NodeBuilder] but with the launch context already configured, -/// See [WithLaunchContext::launch] -pub struct WithLaunchContext { - builder: NodeBuilder, - task_executor: TaskExecutor, - data_dir: ChainPath, -} - -impl WithLaunchContext { - /// Returns a reference to the node builder's config. - pub fn config(&self) -> &NodeConfig { - self.builder.config() - } - - /// Returns a reference to the task executor. - pub fn task_executor(&self) -> &TaskExecutor { - &self.task_executor - } - - /// Returns a reference to the data directory. - pub fn data_dir(&self) -> &ChainPath { - &self.data_dir - } -} - -impl WithLaunchContext -where - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the types of the node. - pub fn with_types(self, types: T) -> WithLaunchContext> - where - T: NodeTypes, - { - WithLaunchContext { - builder: self.builder.with_types(types), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } - - /// Preconfigures the node with a specific node implementation. - pub fn node( - self, - node: N, - ) -> WithLaunchContext< - DB, - ComponentsState< - N, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.with_types(node.clone()).with_components(node.components()) - } -} - -impl WithLaunchContext -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, -{ - /// Launches a preconfigured [Node] - /// - /// This bootstraps the node internals, creates all the components with the given [Node] type - /// and launches the node. - /// - /// Returns a [NodeHandle] that can be used to interact with the node. - pub async fn launch_node( - self, - node: N, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.node(node).launch().await - } -} - -impl WithLaunchContext> -where - Types: NodeTypes, - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the node's components. - /// - /// The given components builder is used to create the components of the node when it is - /// launched. - pub fn with_components( - self, - components_builder: Components, - ) -> WithLaunchContext< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > - where - Components: - NodeComponentsBuilder>>, - { - WithLaunchContext { - builder: self.builder.with_components(components_builder), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } -} - -impl - WithLaunchContext< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - Types: NodeTypes, - Components: NodeComponentsBuilder>>, -{ - /// Apply a function to the components builder. - pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { - Self { - builder: self.builder.map_components(f), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } - - /// Sets the hook that is run once the node's components are initialized. - pub fn on_component_initialized(mut self, hook: F) -> Self - where - F: Fn( - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.hooks.set_on_component_initialized(hook); - self - } - - /// Sets the hook that is run once the node has started. - pub fn on_node_started(mut self, hook: F) -> Self - where - F: Fn( - FullNode< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.hooks.set_on_node_started(hook); - self - } - - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.rpc.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.rpc.set_extend_rpc_modules(hook); - self - } - - /// Installs an ExEx (Execution Extension) in the node. - pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self - where - F: Fn( - ExExContext< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> R - + Send - + 'static, - R: Future> + Send, - E: Future> + Send, - { - self.builder.state.exexs.push((exex_id.into(), Box::new(exex))); - self - } - - /// Launches the node and returns a handle to it. - pub async fn launch( - self, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > { - let Self { builder, task_executor, data_dir } = self; - - builder.launch(task_executor, data_dir).await - } - - /// Check that the builder can be launched - /// - /// This is useful when writing tests to ensure that the builder is configured correctly. - pub fn check_launch(self) -> Self { - self - } -} - -/// Captures the necessary context for building the components of the node. -pub struct BuilderContext { - /// The current head of the blockchain at launch. - head: Head, - /// The configured provider to interact with the blockchain. - provider: Node::Provider, - /// The executor of the node. - executor: TaskExecutor, - /// The data dir of the node. - data_dir: ChainPath, - /// The config of the node - config: NodeConfig, - /// loaded config - reth_config: reth_config::Config, - /// EVM config of the node - evm_config: Node::Evm, -} - -impl BuilderContext { - /// Create a new instance of [BuilderContext] - pub fn new( - head: Head, - provider: Node::Provider, - executor: TaskExecutor, - data_dir: ChainPath, - config: NodeConfig, - reth_config: reth_config::Config, - evm_config: Node::Evm, - ) -> Self { - Self { head, provider, executor, data_dir, config, reth_config, evm_config } - } - - /// Returns the configured provider to interact with the blockchain. - pub fn provider(&self) -> &Node::Provider { - &self.provider - } - - /// Returns the configured evm. - pub fn evm_config(&self) -> &Node::Evm { - &self.evm_config - } - - /// Returns the current head of the blockchain at launch. - pub fn head(&self) -> Head { - self.head - } - - /// Returns the config of the node. - pub fn config(&self) -> &NodeConfig { - &self.config - } - - /// Returns the data dir of the node. - /// - /// This gives access to all relevant files and directories of the node's datadir. - pub fn data_dir(&self) -> &ChainPath { - &self.data_dir - } - - /// Returns the executor of the node. - /// - /// This can be used to execute async tasks or functions during the setup. - pub fn task_executor(&self) -> &TaskExecutor { - &self.executor - } - - /// Returns the chain spec of the node. - pub fn chain_spec(&self) -> Arc { - self.provider().chain_spec() - } - - /// Returns the transaction pool config of the node. - pub fn pool_config(&self) -> PoolConfig { - self.config().txpool.pool_config() - } - - /// Loads `MAINNET_KZG_TRUSTED_SETUP`. - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) - } - - /// Returns the config for payload building. - pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig { - self.config.builder.clone() - } - - /// Returns the default network config for the node. - pub fn network_config(&self) -> eyre::Result> { - self.config.network_config( - &self.reth_config, - self.provider.clone(), - self.executor.clone(), - self.head, - self.data_dir(), - ) - } - - /// Creates the [NetworkBuilder] for the node. - pub async fn network_builder(&self) -> eyre::Result> { - self.config - .build_network( - &self.reth_config, - self.provider.clone(), - self.executor.clone(), - self.head, - self.data_dir(), - ) - .await - } - - /// Convenience function to start the network. - /// - /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected - /// to that network. - pub fn start_network( - &self, - builder: NetworkBuilder, - pool: Pool, - ) -> NetworkHandle - where - Pool: TransactionPool + Unpin + 'static, - { - let (handle, network, txpool, eth) = builder - .transactions(pool, Default::default()) - .request_handler(self.provider().clone()) - .split_with_handle(); - - self.executor.spawn_critical("p2p txpool", txpool); - self.executor.spawn_critical("p2p eth request handler", eth); - - let default_peers_path = self.data_dir().known_peers_path(); - let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); - self.executor.spawn_critical_with_graceful_shutdown_signal( - "p2p network task", - |shutdown| { - network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(network, known_peers_file) - }) - }, - ); - - handle - } -} - -impl std::fmt::Debug for BuilderContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("BuilderContext") - .field("head", &self.head) - .field("provider", &std::any::type_name::()) - .field("executor", &self.executor) - .field("data_dir", &self.data_dir) - .field("config", &self.config) - .finish() - } -} - -/// The initial state of the node builder process. -#[derive(Debug, Default)] -#[non_exhaustive] -pub struct InitState; - -/// The state after all types of the node have been configured. -pub struct TypesState -where - DB: Database + Clone + 'static, - Types: NodeTypes, -{ - adapter: FullNodeTypesAdapter>, -} - -/// The state of the node builder process after the node's components have been configured. -/// -/// With this state all types and components of the node are known and the node can be launched. -/// -/// Additionally, this state captures additional hooks that are called at specific points in the -/// node's launch lifecycle. -pub struct ComponentsState { - /// The types of the node. - types: Types, - /// Type that builds the components of the node. - components_builder: Components, - /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. - hooks: NodeHooks, - /// Additional RPC hooks. - rpc: RpcHooks, - /// The ExExs (execution extensions) of the node. - exexs: Vec<(String, Box>)>, -} - -impl std::fmt::Debug - for ComponentsState -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ComponentsState") - .field("types", &std::any::type_name::()) - .field("components_builder", &std::any::type_name::()) - .field("hooks", &self.hooks) - .field("rpc", &self.rpc) - .field("exexs", &self.exexs.len()) - .finish() - } -} diff --git a/crates/node-builder/src/builder/mod.rs b/crates/node-builder/src/builder/mod.rs new file mode 100644 index 000000000..44bb60588 --- /dev/null +++ b/crates/node-builder/src/builder/mod.rs @@ -0,0 +1,619 @@ +//! Customizable node builder. + +#![allow(clippy::type_complexity, missing_debug_implementations)] + +use crate::{ + components::{Components, ComponentsBuilder, NodeComponentsBuilder, PoolBuilder}, + node::FullNode, + rpc::{RethRpcServerHandles, RpcContext}, + DefaultNodeLauncher, Node, NodeHandle, +}; +use futures::Future; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, + test_utils::{create_test_rw_db, TempDatabase}, + DatabaseEnv, +}; +use reth_exex::ExExContext; +use reth_network::{NetworkBuilder, NetworkConfig, NetworkHandle}; +use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; +use reth_node_core::{ + cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, + dirs::{ChainPath, DataDirPath, MaybePlatformPath}, + node_config::NodeConfig, + primitives::{kzg::KzgSettings, Head}, + utils::write_peers_to_file, +}; +use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; +use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; +use reth_tasks::TaskExecutor; +use reth_transaction_pool::{PoolConfig, TransactionPool}; +pub use states::*; +use std::{str::FromStr, sync::Arc}; + +mod states; + +/// The builtin provider type of the reth node. +// Note: we need to hardcode this because custom components might depend on it in associated types. +pub type RethFullProviderType = BlockchainProvider; + +/// The adapter type for a reth node with the given types +pub type RethFullAdapter = FullNodeTypesAdapter>; + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// Declaratively construct a node. +/// +/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing +/// components of a node. +/// +/// ## Order +/// +/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for +/// example) and then proceeds to configure the core static types of the node: [NodeTypes], these +/// include the node's primitive types and the node's engine types. +/// +/// Next all stateful components of the node are configured, these include the +/// [ConfigureEvm](reth_node_api::evm::ConfigureEvm), the database [Database] and all the +/// components of the node that are downstream of those types, these include: +/// +/// - The transaction pool: [PoolBuilder] +/// - The network: [NetworkBuilder](crate::components::NetworkBuilder) +/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder) +/// +/// Once all the components are configured, the node is ready to be launched. +/// +/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the +/// configured components and can interact with the node. +/// +/// There are convenience functions for networks that come with a preset of types and components via +/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. +/// +/// The [NodeBuilder::node] function configures the node's types and components in one step. +/// +/// ## Components +/// +/// All components are configured with a [NodeComponentsBuilder] that is responsible for actually +/// creating the node components during the launch process. The [ComponentsBuilder] is a general +/// purpose implementation of the [NodeComponentsBuilder] trait that can be used to configure the +/// network, transaction pool and payload builder of the node. It enforces the correct order of +/// configuration, for example the network and the payload builder depend on the transaction pool +/// type that is configured first. +/// +/// All builder traits are generic over the node types and are invoked with the [BuilderContext] +/// that gives access to internals of the that are needed to configure the components. This include +/// the original config, chain spec, the database provider and the task executor, +/// +/// ## Hooks +/// +/// Once all the components are configured, the builder can be used to set hooks that are run at +/// specific points in the node's lifecycle. This way custom services can be spawned before the node +/// is launched [NodeBuilder::on_component_initialized], or once the rpc server(s) are launched +/// [NodeBuilder::on_rpc_started]. The [NodeBuilder::extend_rpc_modules] can be used to inject +/// custom rpc modules into the rpc server before it is launched. See also [RpcContext] +/// All hooks accept a closure that is then invoked at the appropriate time in the node's launch +/// process. +/// +/// ## Flow +/// +/// The [NodeBuilder] is intended to sit behind a CLI that provides the necessary [NodeConfig] +/// input: [NodeBuilder::new] +/// +/// From there the builder is configured with the node's types, components, and hooks, then launched +/// with the [NodeBuilder::launch] method. On launch all the builtin internals, such as the +/// `Database` and its providers [BlockchainProvider] are initialized before the configured +/// [NodeComponentsBuilder] is invoked with the [BuilderContext] to create the transaction pool, +/// network, and payload builder components. When the RPC is configured, the corresponding hooks are +/// invoked to allow for custom rpc modules to be injected into the rpc server: +/// [NodeBuilder::extend_rpc_modules] +/// +/// Finally all components are created and all services are launched and a [NodeHandle] is returned +/// that can be used to interact with the node: [FullNode] +/// +/// The following diagram shows the flow of the node builder from CLI to a launched node. +/// +/// include_mmd!("docs/mermaid/builder.mmd") +/// +/// ## Internals +/// +/// The node builder is fully type safe, it uses the [NodeTypes] trait to enforce that all +/// components are configured with the correct types. However the database types and with that the +/// provider trait implementations are currently created by the builder itself during the launch +/// process, hence the database type is not part of the [NodeTypes] trait and the node's components, +/// that depend on the database, are configured separately. In order to have a nice trait that +/// encapsulates the entire node the [FullNodeComponents] trait was introduced. This trait has +/// convenient associated types for all the components of the node. After [NodeBuilder::launch] the +/// [NodeHandle] contains an instance of [FullNode] that implements the [FullNodeComponents] trait +/// and has access to all the components of the node. Internally the node builder uses several +/// generic adapter types that are then map to traits with associated types for ease of use. +/// +/// ### Limitations +/// +/// Currently the launch process is limited to ethereum nodes and requires all the components +/// specified above. It also expect beacon consensus with the ethereum engine API that is configured +/// by the builder itself during launch. This might change in the future. +/// +/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html +pub struct NodeBuilder { + /// All settings for how the node should be configured. + config: NodeConfig, + /// The configured database for the node. + database: DB, +} + +impl NodeBuilder<()> { + /// Create a new [`NodeBuilder`]. + pub fn new(config: NodeConfig) -> Self { + Self { config, database: () } + } +} + +impl NodeBuilder { + /// Returns a reference to the node builder's config. + pub fn config(&self) -> &NodeConfig { + &self.config + } + + /// Configures the underlying database that the node will use. + pub fn with_database(self, database: D) -> NodeBuilder { + NodeBuilder { config: self.config, database } + } + + /// Preconfigure the builder with the context to launch the node. + /// + /// This provides the task executor and the data directory for the node. + pub fn with_launch_context( + self, + task_executor: TaskExecutor, + data_dir: ChainPath, + ) -> WithLaunchContext> { + WithLaunchContext { builder: self, task_executor, data_dir } + } + + /// Creates an _ephemeral_ preconfigured node for testing purposes. + pub fn testing_node( + self, + task_executor: TaskExecutor, + ) -> WithLaunchContext>>> { + let db = create_test_rw_db(); + let db_path_str = db.path().to_str().expect("Path is not valid unicode"); + let path = + MaybePlatformPath::::from_str(db_path_str).expect("Path is not valid"); + let data_dir = path.unwrap_or_chain_default(self.config.chain.chain); + + WithLaunchContext { builder: self.with_database(db), task_executor, data_dir } + } +} + +impl NodeBuilder +where + DB: Database + Unpin + Clone + 'static, +{ + /// Configures the types of the node. + pub fn with_types(self, types: T) -> NodeBuilderWithTypes> + where + T: NodeTypes, + { + let types = FullNodeTypesAdapter::new(types); + NodeBuilderWithTypes::new(self.config, types, self.database) + } + + /// Preconfigures the node with a specific node implementation. + /// + /// This is a convenience method that sets the node's types and components in one call. + pub fn node( + self, + node: N, + ) -> NodeBuilderWithComponents< + RethFullAdapter, + ComponentsBuilder< + RethFullAdapter, + N::PoolBuilder, + N::PayloadBuilder, + N::NetworkBuilder, + >, + > + where + N: Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: crate::components::NetworkBuilder< + RethFullAdapter, + >>::Pool, + >, + N::PayloadBuilder: crate::components::PayloadServiceBuilder< + RethFullAdapter, + >>::Pool, + >, + { + self.with_types(node.clone()).with_components(node.components()) + } +} + +/// A [NodeBuilder] with it's launch context already configured. +/// +/// This exposes the same methods as [NodeBuilder] but with the launch context already configured, +/// See [WithLaunchContext::launch] +pub struct WithLaunchContext { + builder: Builder, + task_executor: TaskExecutor, + data_dir: ChainPath, +} + +impl WithLaunchContext { + /// Returns a reference to the task executor. + pub fn task_executor(&self) -> &TaskExecutor { + &self.task_executor + } + + /// Returns a reference to the data directory. + pub fn data_dir(&self) -> &ChainPath { + &self.data_dir + } +} + +impl WithLaunchContext> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, +{ + /// Configures the types of the node. + pub fn with_types( + self, + types: T, + ) -> WithLaunchContext>> + where + T: NodeTypes, + { + WithLaunchContext { + builder: self.builder.with_types(types), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Preconfigures the node with a specific node implementation. + pub fn node( + self, + node: N, + ) -> WithLaunchContext< + NodeBuilderWithComponents< + RethFullAdapter, + ComponentsBuilder< + RethFullAdapter, + N::PoolBuilder, + N::PayloadBuilder, + N::NetworkBuilder, + >, + >, + > + where + N: Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: crate::components::NetworkBuilder< + RethFullAdapter, + >>::Pool, + >, + N::PayloadBuilder: crate::components::PayloadServiceBuilder< + RethFullAdapter, + >>::Pool, + >, + { + self.with_types(node.clone()).with_components(node.components()) + } + + /// Launches a preconfigured [Node] + /// + /// This bootstraps the node internals, creates all the components with the given [Node] + /// + /// Returns a [NodeHandle] that can be used to interact with the node. + pub async fn launch_node( + self, + node: N, + ) -> eyre::Result< + NodeHandle< + NodeAdapter< + RethFullAdapter, + Components< + RethFullAdapter, + >>::Pool, + >, + >, + >, + > + where + N: Node>, + N::PoolBuilder: PoolBuilder>, + N::NetworkBuilder: crate::components::NetworkBuilder< + RethFullAdapter, + >>::Pool, + >, + N::PayloadBuilder: crate::components::PayloadServiceBuilder< + RethFullAdapter, + >>::Pool, + >, + { + self.node(node).launch().await + } +} + +impl WithLaunchContext>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, +{ + /// Advances the state of the node builder to the next state where all components are configured + pub fn with_components( + self, + components_builder: CB, + ) -> WithLaunchContext, CB>> + where + CB: NodeComponentsBuilder>, + { + WithLaunchContext { + builder: self.builder.with_components(components_builder), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } +} + +impl WithLaunchContext, CB>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, +{ + /// Sets the hook that is run once the node's components are initialized. + pub fn on_component_initialized(self, hook: F) -> Self + where + F: Fn(NodeAdapter, CB::Components>) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_component_initialized(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run once the node has started. + pub fn on_node_started(self, hook: F) -> Self + where + F: Fn(FullNode, CB::Components>>) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_node_started(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(self, hook: F) -> Self + where + F: Fn( + RpcContext<'_, NodeAdapter, CB::Components>>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_rpc_started(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(self, hook: F) -> Self + where + F: Fn( + RpcContext<'_, NodeAdapter, CB::Components>>, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.extend_rpc_modules(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Installs an ExEx (Execution Extension) in the node. + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self + where + F: Fn(ExExContext, CB::Components>>) -> R + + Send + + 'static, + R: Future> + Send, + E: Future> + Send, + { + Self { + builder: self.builder.install_exex(exex_id, exex), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Launches the node and returns a handle to it. + pub async fn launch( + self, + ) -> eyre::Result, CB::Components>>> { + let Self { builder, task_executor, data_dir } = self; + + let launcher = DefaultNodeLauncher { task_executor, data_dir }; + builder.launch_with(launcher).await + } + + /// Check that the builder can be launched + /// + /// This is useful when writing tests to ensure that the builder is configured correctly. + pub fn check_launch(self) -> Self { + self + } +} + +/// Captures the necessary context for building the components of the node. +pub struct BuilderContext { + /// The current head of the blockchain at launch. + pub(crate) head: Head, + /// The configured provider to interact with the blockchain. + pub(crate) provider: Node::Provider, + /// The executor of the node. + pub(crate) executor: TaskExecutor, + /// The data dir of the node. + pub(crate) data_dir: ChainPath, + /// The config of the node + pub(crate) config: NodeConfig, + /// loaded config + pub(crate) reth_config: reth_config::Config, + /// EVM config of the node + pub(crate) evm_config: Node::Evm, +} + +impl BuilderContext { + /// Create a new instance of [BuilderContext] + pub fn new( + head: Head, + provider: Node::Provider, + executor: TaskExecutor, + data_dir: ChainPath, + config: NodeConfig, + reth_config: reth_config::Config, + evm_config: Node::Evm, + ) -> Self { + Self { head, provider, executor, data_dir, config, reth_config, evm_config } + } + + /// Returns the configured provider to interact with the blockchain. + pub fn provider(&self) -> &Node::Provider { + &self.provider + } + + /// Returns the configured evm. + pub fn evm_config(&self) -> &Node::Evm { + &self.evm_config + } + + /// Returns the current head of the blockchain at launch. + pub fn head(&self) -> Head { + self.head + } + + /// Returns the config of the node. + pub fn config(&self) -> &NodeConfig { + &self.config + } + + /// Returns the data dir of the node. + /// + /// This gives access to all relevant files and directories of the node's datadir. + pub fn data_dir(&self) -> &ChainPath { + &self.data_dir + } + + /// Returns the executor of the node. + /// + /// This can be used to execute async tasks or functions during the setup. + pub fn task_executor(&self) -> &TaskExecutor { + &self.executor + } + + /// Returns the chain spec of the node. + pub fn chain_spec(&self) -> Arc { + self.provider().chain_spec() + } + + /// Returns the transaction pool config of the node. + pub fn pool_config(&self) -> PoolConfig { + self.config().txpool.pool_config() + } + + /// Loads `MAINNET_KZG_TRUSTED_SETUP`. + pub fn kzg_settings(&self) -> eyre::Result> { + Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + } + + /// Returns the config for payload building. + pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig { + self.config.builder.clone() + } + + /// Returns the default network config for the node. + pub fn network_config(&self) -> eyre::Result> { + self.config.network_config( + &self.reth_config, + self.provider.clone(), + self.executor.clone(), + self.head, + self.data_dir(), + ) + } + + /// Creates the [NetworkBuilder] for the node. + pub async fn network_builder(&self) -> eyre::Result> { + self.config + .build_network( + &self.reth_config, + self.provider.clone(), + self.executor.clone(), + self.head, + self.data_dir(), + ) + .await + } + + /// Convenience function to start the network. + /// + /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected + /// to that network. + pub fn start_network( + &self, + builder: NetworkBuilder, + pool: Pool, + ) -> NetworkHandle + where + Pool: TransactionPool + Unpin + 'static, + { + let (handle, network, txpool, eth) = builder + .transactions(pool, Default::default()) + .request_handler(self.provider().clone()) + .split_with_handle(); + + self.executor.spawn_critical("p2p txpool", txpool); + self.executor.spawn_critical("p2p eth request handler", eth); + + let default_peers_path = self.data_dir().known_peers_path(); + let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); + self.executor.spawn_critical_with_graceful_shutdown_signal( + "p2p network task", + |shutdown| { + network.run_until_graceful_shutdown(shutdown, |network| { + write_peers_to_file(network, known_peers_file) + }) + }, + ); + + handle + } +} + +impl std::fmt::Debug for BuilderContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BuilderContext") + .field("head", &self.head) + .field("provider", &std::any::type_name::()) + .field("executor", &self.executor) + .field("data_dir", &self.data_dir) + .field("config", &self.config) + .finish() + } +} diff --git a/crates/node-builder/src/builder/states.rs b/crates/node-builder/src/builder/states.rs new file mode 100644 index 000000000..975590c5f --- /dev/null +++ b/crates/node-builder/src/builder/states.rs @@ -0,0 +1,237 @@ +//! Node builder states and helper traits. +//! +//! Keeps track of the current state of the node builder. +//! +//! The node builder process is essentially a state machine that transitions through various states +//! before the node can be launched. + +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + exex::BoxedLaunchExEx, + hooks::NodeHooks, + launch::LaunchNode, + rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, + FullNode, +}; +use reth_exex::ExExContext; +use reth_network::NetworkHandle; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; +use reth_node_core::node_config::NodeConfig; +use reth_payload_builder::PayloadBuilderHandle; +use reth_tasks::TaskExecutor; +use std::{fmt, future::Future}; + +/// A node builder that also has the configured types. +pub struct NodeBuilderWithTypes { + /// All settings for how the node should be configured. + config: NodeConfig, + /// The configured database for the node. + adapter: NodeTypesAdapter, +} + +impl NodeBuilderWithTypes { + /// Creates a new instance of the node builder with the given configuration and types. + pub fn new(config: NodeConfig, types: T, database: T::DB) -> Self { + Self { config, adapter: NodeTypesAdapter::new(types, database) } + } + + /// Advances the state of the node builder to the next state where all components are configured + pub fn with_components(self, components_builder: CB) -> NodeBuilderWithComponents + where + CB: NodeComponentsBuilder, + { + let Self { config, adapter } = self; + + NodeBuilderWithComponents { + config, + adapter, + components_builder, + add_ons: NodeAddOns { + hooks: NodeHooks::default(), + rpc: RpcHooks::new(), + exexs: Vec::new(), + }, + } + } +} + +/// Container for the node's types and the database the node uses. +pub(crate) struct NodeTypesAdapter { + /// The database type used by the node. + pub(crate) database: T::DB, + // TODO(mattsse): make this stateless + pub(crate) types: T, +} + +impl NodeTypesAdapter { + /// Create a new adapter from the given node types. + pub(crate) fn new(types: T, database: T::DB) -> Self { + Self { types, database } + } +} + +impl fmt::Debug for NodeTypesAdapter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NodeTypesAdapter").field("db", &"...").field("types", &"...").finish() + } +} + +/// Container for the node's types and the components and other internals that can be used by addons +/// of the node. +pub struct NodeAdapter> { + /// The components of the node. + pub components: C, + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The provider of the node. + pub provider: T::Provider, + /// EVM config + pub evm: T::Evm, +} + +impl> NodeTypes for NodeAdapter { + type Primitives = T::Primitives; + type Engine = T::Engine; + type Evm = T::Evm; + + fn evm_config(&self) -> Self::Evm { + self.evm.clone() + } +} + +impl> FullNodeTypes for NodeAdapter { + type DB = T::DB; + type Provider = T::Provider; +} + +impl> FullNodeComponents for NodeAdapter { + type Pool = C::Pool; + + fn pool(&self) -> &Self::Pool { + self.components.pool() + } + + fn provider(&self) -> &Self::Provider { + &self.provider + } + + fn network(&self) -> &NetworkHandle { + self.components.network() + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + self.components.payload_builder() + } + + fn task_executor(&self) -> &TaskExecutor { + &self.task_executor + } +} + +impl> Clone for NodeAdapter { + fn clone(&self) -> Self { + Self { + components: self.components.clone(), + task_executor: self.task_executor.clone(), + provider: self.provider.clone(), + evm: self.evm.clone(), + } + } +} + +/// A fully type configured node builder. +/// +/// Supports adding additional addons to the node. +pub struct NodeBuilderWithComponents> { + /// All settings for how the node should be configured. + pub(crate) config: NodeConfig, + /// Adapter for the underlying node types and database + pub(crate) adapter: NodeTypesAdapter, + /// container for type specific components + pub(crate) components_builder: CB, + /// Additional node extensions. + pub(crate) add_ons: NodeAddOns>, +} + +impl> NodeBuilderWithComponents { + /// Sets the hook that is run once the node's components are initialized. + pub fn on_component_initialized(mut self, hook: F) -> Self + where + F: Fn(NodeAdapter) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.hooks.set_on_component_initialized(hook); + self + } + + /// Sets the hook that is run once the node has started. + pub fn on_node_started(mut self, hook: F) -> Self + where + F: Fn(FullNode>) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.hooks.set_on_node_started(hook); + self + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(mut self, hook: F) -> Self + where + F: Fn( + RpcContext<'_, NodeAdapter>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + self.add_ons.rpc.set_on_rpc_started(hook); + self + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(mut self, hook: F) -> Self + where + F: Fn(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.rpc.set_extend_rpc_modules(hook); + self + } + + /// Installs an ExEx (Execution Extension) in the node. + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self + where + F: Fn(ExExContext>) -> R + Send + 'static, + R: Future> + Send, + E: Future> + Send, + { + self.add_ons.exexs.push((exex_id.into(), Box::new(exex))); + self + } + + /// Launches the node with the given launcher. + pub async fn launch_with(self, launcher: L) -> eyre::Result + where + L: LaunchNode, + { + launcher.launch_node(self).await + } + + /// Check that the builder can be launched + /// + /// This is useful when writing tests to ensure that the builder is configured correctly. + pub fn check_launch(self) -> Self { + self + } +} + +/// Additional node extensions. +pub(crate) struct NodeAddOns { + /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. + pub(crate) hooks: NodeHooks, + /// Additional RPC hooks. + pub(crate) rpc: RpcHooks, + /// The ExExs (execution extensions) of the node. + pub(crate) exexs: Vec<(String, Box>)>, +} diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs index 14bdf7a4a..1c963f024 100644 --- a/crates/node-builder/src/components/builder.rs +++ b/crates/node-builder/src/components/builder.rs @@ -1,11 +1,11 @@ //! A generic [NodeComponentsBuilder] use crate::{ - components::{NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, + components::{Components, NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, BuilderContext, FullNodeTypes, }; use reth_transaction_pool::TransactionPool; -use std::marker::PhantomData; +use std::{future::Future, marker::PhantomData}; /// A generic, customizable [`NodeComponentsBuilder`]. /// @@ -135,19 +135,19 @@ where NetworkB: NetworkBuilder, PayloadB: PayloadServiceBuilder, { - type Pool = PoolB::Pool; + type Components = Components; async fn build_components( self, context: &BuilderContext, - ) -> eyre::Result> { + ) -> eyre::Result { let Self { pool_builder, payload_builder, network_builder, _marker } = self; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - Ok(NodeComponents { transaction_pool: pool, network, payload_builder }) + Ok(Components { transaction_pool: pool, network, payload_builder }) } } @@ -170,31 +170,31 @@ impl Default for ComponentsBuilder<(), (), (), ()> { /// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize /// certain components of the node using the builder pattern and defaults, e.g. Ethereum and /// Optimism. -pub trait NodeComponentsBuilder { - /// The transaction pool to use. - type Pool: TransactionPool + Unpin + 'static; +/// A type that's responsible for building the components of the node. +pub trait NodeComponentsBuilder: Send { + /// The components for the node with the given types + type Components: NodeComponents; - /// Builds the components of the node. + /// Consumes the type and returns the crated components. fn build_components( self, - context: &BuilderContext, - ) -> impl std::future::Future>> + Send; + ctx: &BuilderContext, + ) -> impl Future> + Send; } impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: std::future::Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, { - type Pool = Pool; + type Components = Components; fn build_components( self, ctx: &BuilderContext, - ) -> impl std::future::Future>> + Send - { + ) -> impl Future> + Send { self(ctx) } } diff --git a/crates/node-builder/src/components/mod.rs b/crates/node-builder/src/components/mod.rs index 4aa73f0ff..ea087ece2 100644 --- a/crates/node-builder/src/components/mod.rs +++ b/crates/node-builder/src/components/mod.rs @@ -14,17 +14,36 @@ pub use payload::*; pub use pool::*; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; +use reth_transaction_pool::TransactionPool; mod builder; mod network; mod payload; mod pool; +/// An abstraction over the components of a node, consisting of: +/// - transaction pool +/// - network +/// - payload builder. +pub trait NodeComponents: Clone + Send + Sync + 'static { + /// The transaction pool of the node. + type Pool: TransactionPool + Unpin; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the handle to the network + fn network(&self) -> &NetworkHandle; + + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &PayloadBuilderHandle; +} + /// All the components of the node. /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct NodeComponents { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The network implementation of the node. @@ -33,9 +52,36 @@ pub struct NodeComponents { pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents { - /// Returns the handle to the payload builder service. - pub fn payload_builder(&self) -> PayloadBuilderHandle { - self.payload_builder.clone() +impl NodeComponents for Components +where + Node: FullNodeTypes, + Pool: TransactionPool + Unpin + 'static, +{ + type Pool = Pool; + + fn pool(&self) -> &Self::Pool { + &self.transaction_pool + } + + fn network(&self) -> &NetworkHandle { + &self.network + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + &self.payload_builder + } +} + +impl Clone for Components +where + Node: FullNodeTypes, + Pool: TransactionPool, +{ + fn clone(&self) -> Self { + Self { + transaction_pool: self.transaction_pool.clone(), + network: self.network.clone(), + payload_builder: self.payload_builder.clone(), + } } } diff --git a/crates/node-builder/src/launch.rs b/crates/node-builder/src/launch.rs new file mode 100644 index 000000000..645598ada --- /dev/null +++ b/crates/node-builder/src/launch.rs @@ -0,0 +1,558 @@ +//! Abstraction for launching a node. + +use crate::{ + builder::{NodeAdapter, NodeAddOns, NodeTypesAdapter}, + components::{NodeComponents, NodeComponentsBuilder}, + hooks::NodeHooks, + node::FullNode, + BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, +}; +use eyre::Context; +use futures::{future, future::Either, stream, stream_select, StreamExt}; +use rayon::ThreadPoolBuilder; +use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; +use reth_beacon_consensus::{ + hooks::{EngineHooks, PruneHook, StaticFileHook}, + BeaconConsensus, BeaconConsensusEngine, +}; +use reth_blockchain_tree::{ + BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, +}; +use reth_config::config::EtlConfig; +use reth_consensus::Consensus; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, +}; +use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; +use reth_interfaces::p2p::either::EitherDownloader; +use reth_network::NetworkEvents; +use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_core::{ + cli::config::RethRpcConfig, + dirs::{ChainPath, DataDirPath}, + engine_api_store::EngineApiStore, + engine_skip_fcu::EngineApiSkipFcu, + exit::NodeExitFuture, + init::init_genesis, + node_config::NodeConfig, +}; +use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; +use reth_primitives::format_ether; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; +use reth_prune::PrunerBuilder; +use reth_revm::EvmProcessorFactory; +use reth_rpc_engine_api::EngineApi; +use reth_static_file::StaticFileProducer; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::{debug, error, info}; +use reth_transaction_pool::TransactionPool; +use std::{cmp::max, future::Future, sync::Arc, thread::available_parallelism}; +use tokio::sync::{mpsc::unbounded_channel, oneshot}; + +/// Launches a new node. +/// +/// Acts as a node factory. +/// +/// This is essentially the launch logic for a node. +pub trait LaunchNode { + /// The node type that is created. + type Node; + + /// Create and return a new node asynchronously. + fn launch_node(self, target: Target) -> impl Future> + Send; +} + +/// The default launcher for a node. +#[derive(Debug)] +pub struct DefaultNodeLauncher { + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The data directory for the node. + pub data_dir: ChainPath, +} + +impl DefaultNodeLauncher { + /// Create a new instance of the default node launcher. + pub fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { + Self { task_executor, data_dir } + } + + /// Loads the reth config with the given datadir root + fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); + + let mut toml_config = confy::load_path::(&config_path) + .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Update the config with the command line arguments + toml_config.peers.trusted_nodes_only = config.network.trusted_only; + + if !config.network.trusted_peers.is_empty() { + info!(target: "reth::cli", "Adding trusted nodes"); + config.network.trusted_peers.iter().for_each(|peer| { + toml_config.peers.trusted_nodes.insert(*peer); + }); + } + + Ok(toml_config) + } +} + +impl LaunchNode, CB>> + for DefaultNodeLauncher +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, +{ + type Node = NodeHandle, CB::Components>>; + + async fn launch_node( + self, + target: NodeBuilderWithComponents, CB>, + ) -> eyre::Result { + let NodeBuilderWithComponents { + adapter: NodeTypesAdapter { types, database }, + components_builder, + add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, + config, + } = target; + + // get config from file + let reth_config = self.load_toml_config(&config)?; + + let Self { task_executor, data_dir } = self; + + // Raise the fd limit of the process. + // Does not do anything on windows. + fdlimit::raise_fd_limit()?; + + // Limit the global rayon thread pool, reserving 2 cores for the rest of the system + let _ = ThreadPoolBuilder::new() + .num_threads( + available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), + ) + .build_global() + .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + + let provider_factory = ProviderFactory::new( + database.clone(), + Arc::clone(&config.chain), + data_dir.static_files_path(), + )? + .with_static_files_metrics(); + info!(target: "reth::cli", "Database opened"); + + let prometheus_handle = config.install_prometheus_recorder()?; + config + .start_metrics_endpoint( + prometheus_handle, + database.clone(), + provider_factory.static_file_provider(), + task_executor.clone(), + ) + .await?; + + debug!(target: "reth::cli", chain=%config.chain.chain, +genesis=?config.chain.genesis_hash(), "Initializing genesis"); + + let genesis_hash = init_genesis(provider_factory.clone())?; + + info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); + + // setup the consensus instance + let consensus: Arc = if config.dev.dev { + Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) + } else { + Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) + }; + + debug!(target: "reth::cli", "Spawning stages metrics listener task"); + let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); + let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); + task_executor.spawn_critical("stages metrics listener task", sync_metrics_listener); + + let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); + + // Configure the blockchain tree for the node + let evm_config = types.evm_config(); + let tree_config = BlockchainTreeConfig::default(); + let tree_externals = TreeExternals::new( + provider_factory.clone(), + consensus.clone(), + EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), + ); + let tree = BlockchainTree::new( + tree_externals, + tree_config, + prune_config.as_ref().map(|config| config.segments.clone()), + )? + .with_sync_metrics_tx(sync_metrics_tx.clone()); + + let canon_state_notification_sender = tree.canon_state_notification_sender(); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + debug!(target: "reth::cli", "configured blockchain tree"); + + // fetch the head block from the database + let head = + config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; + + // setup the blockchain provider + let blockchain_db = + BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + + let ctx = BuilderContext::new( + head, + blockchain_db, + task_executor, + data_dir, + config, + reth_config, + evm_config.clone(), + ); + + debug!(target: "reth::cli", "creating components"); + let components = components_builder.build_components(&ctx).await?; + + let BuilderContext { + provider: blockchain_db, + executor, + data_dir, + mut config, + mut reth_config, + .. + } = ctx; + + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; + + let node_adapter = NodeAdapter { + components, + task_executor: executor.clone(), + provider: blockchain_db.clone(), + evm: evm_config.clone(), + }; + + debug!(target: "reth::cli", "calling on_component_initialized hook"); + on_component_initialized.on_event(node_adapter.clone())?; + + // spawn exexs + let mut exex_handles = Vec::with_capacity(installed_exex.len()); + let mut exexs = Vec::with_capacity(installed_exex.len()); + for (id, exex) in installed_exex { + // create a new exex handle + let (handle, events, notifications) = ExExHandle::new(id.clone()); + exex_handles.push(handle); + + // create the launch context for the exex + let context = ExExContext { + head, + provider: blockchain_db.clone(), + task_executor: executor.clone(), + data_dir: data_dir.clone(), + config: config.clone(), + reth_config: reth_config.clone(), + pool: node_adapter.components.pool().clone(), + events, + notifications, + }; + + let executor = executor.clone(); + exexs.push(async move { + debug!(target: "reth::cli", id, "spawning exex"); + let span = reth_tracing::tracing::info_span!("exex", id); + let _enter = span.enter(); + + // init the exex + let exex = exex.launch(context).await.unwrap(); + + // spawn it as a crit task + executor.spawn_critical("exex", async move { + info!(target: "reth::cli", "ExEx started"); + match exex.await { + Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), + Err(err) => panic!("ExEx {id} crashed: {err}"), + } + }); + }); + } + + future::join_all(exexs).await; + + // spawn exex manager + let exex_manager_handle = if !exex_handles.is_empty() { + debug!(target: "reth::cli", "spawning exex manager"); + // todo(onbjerg): rm magic number + let exex_manager = ExExManager::new(exex_handles, 1024); + let exex_manager_handle = exex_manager.handle(); + executor.spawn_critical("exex manager", async move { + exex_manager.await.expect("exex manager crashed"); + }); + + // send notifications from the blockchain tree to exex manager + let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); + let mut handle = exex_manager_handle.clone(); + executor.spawn_critical("exex manager blockchain tree notifications", async move { + while let Ok(notification) = canon_state_notifications.recv().await { + handle.send_async(notification.into()).await.expect( + "blockchain tree notification could not be sent to exex +manager", + ); + } + }); + + info!(target: "reth::cli", "ExEx Manager started"); + + Some(exex_manager_handle) + } else { + None + }; + + // create pipeline + let network_client = node_adapter.network().fetch_client().await?; + let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); + + if let Some(skip_fcu_threshold) = config.debug.skip_fcu { + debug!(target: "reth::cli", "spawning skip FCU task"); + let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); + let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); + executor.spawn_critical( + "skip FCU interceptor", + engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), + ); + consensus_engine_rx = skip_fcu_rx; + } + + if let Some(store_path) = config.debug.engine_api_store.clone() { + debug!(target: "reth::cli", "spawning engine API store"); + let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); + let engine_api_store = EngineApiStore::new(store_path); + executor.spawn_critical( + "engine api interceptor", + engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), + ); + consensus_engine_rx = engine_intercept_rx; + }; + + let max_block = config.max_block(network_client.clone(), provider_factory.clone()).await?; + let mut hooks = EngineHooks::new(); + + let static_file_producer = StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + prune_config.clone().unwrap_or_default().segments, + ); + let static_file_producer_events = static_file_producer.lock().events(); + hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); + info!(target: "reth::cli", "StaticFileProducer initialized"); + + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if reth_config.stages.etl.dir.is_none() { + reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + } + + // Configure the pipeline + let pipeline_exex_handle = + exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); + let (mut pipeline, client) = if config.dev.dev { + info!(target: "reth::cli", "Starting Reth in dev mode"); + + for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { + info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, +address.to_string(), format_ether(alloc.balance)); + } + + // install auto-seal + let pending_transactions_listener = + node_adapter.components.pool().pending_transactions_listener(); + + let mining_mode = if let Some(interval) = config.dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = config.dev.block_max_transactions { + MiningMode::instant(max_transactions, pending_transactions_listener) + } else { + info!(target: "reth::cli", "No mining mode specified, defaulting to +ReadyTransaction"); + MiningMode::instant(1, pending_transactions_listener) + }; + + let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( + Arc::clone(&config.chain), + blockchain_db.clone(), + node_adapter.components.pool().clone(), + consensus_engine_tx.clone(), + canon_state_notification_sender, + mining_mode, + evm_config.clone(), + ) + .build(); + + let mut pipeline = crate::setup::build_networked_pipeline( + &config, + &reth_config.stages, + client.clone(), + Arc::clone(&consensus), + provider_factory.clone(), + &executor, + sync_metrics_tx, + prune_config.clone(), + max_block, + static_file_producer, + evm_config, + pipeline_exex_handle, + ) + .await?; + + let pipeline_events = pipeline.events(); + task.set_pipeline_events(pipeline_events); + debug!(target: "reth::cli", "Spawning auto mine task"); + executor.spawn(Box::pin(task)); + + (pipeline, EitherDownloader::Left(client)) + } else { + let pipeline = crate::setup::build_networked_pipeline( + &config, + &reth_config.stages, + network_client.clone(), + Arc::clone(&consensus), + provider_factory.clone(), + &executor, + sync_metrics_tx, + prune_config.clone(), + max_block, + static_file_producer, + evm_config, + pipeline_exex_handle, + ) + .await?; + + (pipeline, EitherDownloader::Right(network_client.clone())) + }; + + let pipeline_events = pipeline.events(); + + let initial_target = config.initial_pipeline_target(genesis_hash); + + let prune_config = prune_config.unwrap_or_default(); + let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) + .max_reorg_depth(tree_config.max_reorg_depth() as usize) + .prune_delete_limit(config.chain.prune_delete_limit) + .timeout(PrunerBuilder::DEFAULT_TIMEOUT); + if let Some(exex_manager_handle) = &exex_manager_handle { + pruner_builder = + pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); + } + + let mut pruner = pruner_builder.build(provider_factory.clone()); + + let pruner_events = pruner.events(); + hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); + info!(target: "reth::cli", ?prune_config, "Pruner initialized"); + + // Configure the consensus engine + let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( + client, + pipeline, + blockchain_db.clone(), + Box::new(executor.clone()), + Box::new(node_adapter.components.network().clone()), + max_block, + config.debug.continuous, + node_adapter.components.payload_builder().clone(), + initial_target, + reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, + consensus_engine_tx, + consensus_engine_rx, + hooks, + )?; + info!(target: "reth::cli", "Consensus engine initialized"); + + let events = stream_select!( + node_adapter.components.network().event_listener().map(Into::into), + beacon_engine_handle.event_listener().map(Into::into), + pipeline_events.map(Into::into), + if config.debug.tip.is_none() && !config.dev.dev { + Either::Left( + ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) + .map(Into::into), + ) + } else { + Either::Right(stream::empty()) + }, + pruner_events.map(Into::into), + static_file_producer_events.map(Into::into) + ); + executor.spawn_critical( + "events task", + node::handle_events( + Some(node_adapter.components.network().clone()), + Some(head.number), + events, + database.clone(), + ), + ); + + let engine_api = EngineApi::new( + blockchain_db.clone(), + config.chain.clone(), + beacon_engine_handle, + node_adapter.components.payload_builder().clone().into(), + Box::new(executor.clone()), + ); + info!(target: "reth::cli", "Engine API handler initialized"); + + // extract the jwt secret from the args if possible + let default_jwt_path = data_dir.jwt_path(); + let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; + + // adjust rpc port numbers based on instance number + config.adjust_instance_ports(); + + // Start RPC servers + let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( + node_adapter.clone(), + engine_api, + &config, + jwt_secret, + rpc, + ) + .await?; + + // in dev mode we generate 20 random dev-signer accounts + if config.dev.dev { + rpc_registry.eth_api().with_dev_accounts(); + } + + // Run consensus engine to completion + let (tx, rx) = oneshot::channel(); + info!(target: "reth::cli", "Starting consensus engine"); + executor.spawn_critical_blocking("consensus engine", async move { + let res = beacon_consensus_engine.await; + let _ = tx.send(res); + }); + + let full_node = FullNode { + evm_config: node_adapter.evm.clone(), + pool: node_adapter.components.pool().clone(), + network: node_adapter.components.network().clone(), + provider: node_adapter.provider.clone(), + payload_builder: node_adapter.components.payload_builder().clone(), + task_executor: executor, + rpc_server_handles, + rpc_registry, + config, + data_dir, + }; + // Notify on node started + on_node_started.on_event(full_node.clone())?; + + let handle = NodeHandle { + node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), + node: full_node, + }; + + Ok(handle) + } +} diff --git a/crates/node-builder/src/lib.rs b/crates/node-builder/src/lib.rs index f5d7012d1..11b56ba24 100644 --- a/crates/node-builder/src/lib.rs +++ b/crates/node-builder/src/lib.rs @@ -21,6 +21,9 @@ pub mod components; mod builder; pub use builder::*; +mod launch; +pub use launch::*; + mod handle; pub use handle::NodeHandle; diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 2b186b19c..38f6ee4d9 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -270,7 +270,7 @@ impl NodeConfig { /// necessary pub async fn max_block( &self, - network_client: &Client, + network_client: Client, provider: Provider, ) -> eyre::Result> where diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 1304d77d1..3fd158b6a 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -97,87 +97,3 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// Returns the task executor. fn task_executor(&self) -> &TaskExecutor; } - -/// A type that encapsulates all the components of the node. -#[derive(Debug)] -pub struct FullNodeComponentsAdapter { - /// The EVM configuration of the node. - pub evm_config: Node::Evm, - /// The transaction pool of the node. - pub pool: Pool, - /// The network handle of the node. - pub network: NetworkHandle, - /// The provider of the node. - pub provider: Node::Provider, - /// The payload builder service handle of the node. - pub payload_builder: PayloadBuilderHandle, - /// The task executor of the node. - pub executor: TaskExecutor, -} - -impl FullNodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type DB = Node::DB; - type Provider = Node::Provider; -} - -impl NodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Primitives = Node::Primitives; - type Engine = Node::Engine; - type Evm = Node::Evm; - - fn evm_config(&self) -> Self::Evm { - self.evm_config.clone() - } -} - -impl FullNodeComponents for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Pool = Pool; - - fn pool(&self) -> &Self::Pool { - &self.pool - } - - fn provider(&self) -> &Self::Provider { - &self.provider - } - - fn network(&self) -> &NetworkHandle { - &self.network - } - - fn payload_builder(&self) -> &PayloadBuilderHandle { - &self.payload_builder - } - - fn task_executor(&self) -> &TaskExecutor { - &self.executor - } -} - -impl Clone for FullNodeComponentsAdapter -where - Pool: Clone, -{ - fn clone(&self) -> Self { - Self { - evm_config: self.evm_config.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - provider: self.provider.clone(), - payload_builder: self.payload_builder.clone(), - executor: self.executor.clone(), - } - } -} From 76a3d8278ac96fc55f918fdb8d8fc9561b75c211 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:23:45 -0400 Subject: [PATCH 034/250] feat: add thorough error message to state root error (#7607) --- crates/consensus/consensus/src/lib.rs | 7 +++++ .../interfaces/src/blockchain_tree/error.rs | 30 +++++++++++++++++++ crates/interfaces/src/executor.rs | 5 ++++ crates/stages-api/src/error.rs | 10 +++++++ crates/stages/src/stages/merkle.rs | 30 +++++++++++++++++-- 5 files changed, 79 insertions(+), 3 deletions(-) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index b434272a4..2dee6b124 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -259,6 +259,13 @@ pub enum ConsensusError { HeaderValidationError(#[from] HeaderValidationError), } +impl ConsensusError { + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + matches!(self, ConsensusError::BodyStateRootDiff(_)) + } +} + /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(thiserror::Error, Debug)] #[error("Consensus error: {0}, Invalid header: {1:?}")] diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 34e018835..44f1f50bc 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -243,6 +243,36 @@ impl InsertBlockErrorKind { matches!(self, InsertBlockErrorKind::Consensus(_)) } + /// Returns true if this error is a state root error + pub fn is_state_root_error(&self) -> bool { + // we need to get the state root errors inside of the different variant branches + match self { + InsertBlockErrorKind::Execution(err) => { + matches!( + err, + BlockExecutionError::Validation(BlockValidationError::StateRoot { .. }) + ) + } + InsertBlockErrorKind::Canonical(err) => { + matches!( + err, + CanonicalError::Validation(BlockValidationError::StateRoot { .. }) | + CanonicalError::Provider( + ProviderError::StateRootMismatch(_) | + ProviderError::UnwindStateRootMismatch(_) + ) + ) + } + InsertBlockErrorKind::Provider(err) => { + matches!( + err, + ProviderError::StateRootMismatch(_) | ProviderError::UnwindStateRootMismatch(_) + ) + } + _ => false, + } + } + /// Returns true if the error is caused by an invalid block /// /// This is intended to be used to determine if the block should be marked as invalid. diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index 25e2f5710..e8f7f40b1 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -153,4 +153,9 @@ impl BlockExecutionError { pub fn is_fatal(&self) -> bool { matches!(self, Self::CanonicalCommit { .. } | Self::CanonicalRevert { .. }) } + + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + matches!(self, Self::Validation(BlockValidationError::StateRoot(_))) + } } diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 3d7ae1d72..37fe2b3fd 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -20,6 +20,16 @@ pub enum BlockErrorKind { Execution(#[from] executor::BlockExecutionError), } +impl BlockErrorKind { + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + match self { + BlockErrorKind::Validation(err) => err.is_state_root_error(), + BlockErrorKind::Execution(err) => err.is_state_root_error(), + } + } +} + /// A stage execution error. #[derive(Error, Debug)] pub enum StageError { diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index bfdb9782b..562cff183 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -21,6 +21,24 @@ use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress}; use std::fmt::Debug; use tracing::*; +// TODO: automate the process outlined below so the user can just send in a debugging package +/// The error message that we include in invalid state root errors to tell users what information +/// they should include in a bug report, since true state root errors can be impossible to debug +/// with just basic logs. +pub const INVALID_STATE_ROOT_ERROR_MESSAGE: &str = r#" +Invalid state root error on new payload! +This is an error that likely requires a report to the reth team with additional information. +Please include the following information in your report: + * This error message + * The state root of the block that was rejected + * The output of `reth db stats --checksum` from the database that was being used. This will take a long time to run! + * 50-100 lines of logs before and after the first occurrence of this log message. Please search your log output for the first observed occurrence of MAGIC_STATE_ROOT. + * The debug logs from __the same time period__. To find the default location for these logs, run: + `reth --help | grep -A 4 'log.file.directory'` + +Once you have this information, please submit a github issue at https://github.com/paradigmxyz/reth/issues/new +"#; + /// The default threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 5_000; @@ -196,7 +214,10 @@ impl Stage for MerkleStage { let progress = StateRoot::from_tx(tx) .with_intermediate_state(checkpoint.map(IntermediateStateRootState::from)) .root_with_progress() - .map_err(|e| StageError::Fatal(Box::new(e)))?; + .map_err(|e| { + error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "State root with progress failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { updates.flush(tx)?; @@ -230,7 +251,10 @@ impl Stage for MerkleStage { debug!(target: "sync::stages::merkle::exec", current = ?current_block_number, target = ?to_block, "Updating trie"); let (root, updates) = StateRoot::incremental_root_with_updates(provider.tx_ref(), range) - .map_err(|e| StageError::Fatal(Box::new(e)))?; + .map_err(|e| { + error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + })?; updates.flush(provider.tx_ref())?; let total_hashed_entries = (provider.count_entries::()? + @@ -325,7 +349,7 @@ fn validate_state_root( if got == expected.state_root { Ok(()) } else { - warn!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root"); + error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); Err(StageError::Block { error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), From a22cf2189f51db26f6566be21cebc0336312b1f1 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:24:10 -0400 Subject: [PATCH 035/250] fix: still delete headers from db in headers unwind (#7579) --- crates/stages/src/stages/headers.rs | 138 ++++++++++++++++++++++++---- 1 file changed, 122 insertions(+), 16 deletions(-) diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 83dd710d0..fd1484128 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -321,28 +321,48 @@ where ) -> Result { self.sync_gap.take(); + // First unwind the db tables, until the unwind_to block number. use the walker to unwind + // HeaderNumbers based on the index in CanonicalHeaders + provider.unwind_table_by_walker::( + input.unwind_to, + )?; + provider.unwind_table_by_num::(input.unwind_to)?; + provider.unwind_table_by_num::(input.unwind_to)?; + let unfinalized_headers_unwound = + provider.unwind_table_by_num::(input.unwind_to)?; + + // determine how many headers to unwind from the static files based on the highest block and + // the unwind_to block let static_file_provider = provider.static_file_provider(); let highest_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - let unwound_headers = highest_block - input.unwind_to; - - for block in (input.unwind_to + 1)..=highest_block { - let header_hash = static_file_provider - .block_hash(block)? - .ok_or(ProviderError::HeaderNotFound(block.into()))?; - - provider.tx_ref().delete::(header_hash, None)?; + let static_file_headers_to_unwind = highest_block - input.unwind_to; + for block_number in (input.unwind_to + 1)..=highest_block { + let hash = static_file_provider.block_hash(block_number)?; + // we have to delete from HeaderNumbers here as well as in the above unwind, since that + // mapping contains entries for both headers in the db and headers in static files + // + // so if we are unwinding past the lowest block in the db, we have to iterate through + // the HeaderNumbers entries that we'll delete in static files below + if let Some(header_hash) = hash { + provider.tx_ref().delete::(header_hash, None)?; + } } + // Now unwind the static files until the unwind_to block number let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.prune_headers(unwound_headers)?; + writer.prune_headers(static_file_headers_to_unwind)?; + // Set the stage checkpoin entities processed based on how much we unwound - we add the + // headers unwound from static files and db let stage_checkpoint = input.checkpoint.headers_stage_checkpoint().map(|stage_checkpoint| HeadersCheckpoint { block_range: stage_checkpoint.block_range, progress: EntitiesCheckpoint { - processed: stage_checkpoint.progress.processed.saturating_sub(unwound_headers), + processed: stage_checkpoint.progress.processed.saturating_sub( + static_file_headers_to_unwind + unfinalized_headers_unwound as u64, + ), total: stage_checkpoint.progress.total, }, }); @@ -363,9 +383,12 @@ mod tests { stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; - use reth_interfaces::test_utils::generators::random_header; - use reth_primitives::{stage::StageUnitCheckpoint, B256}; - use reth_provider::ProviderFactory; + use reth_interfaces::test_utils::generators::{self, random_header, random_header_range}; + use reth_primitives::{ + stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, + }; + use reth_provider::{BlockHashReader, BlockWriter, BundleStateWithReceipts, ProviderFactory}; + use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; mod test_runner { @@ -376,9 +399,7 @@ mod tests { use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; - use reth_interfaces::test_utils::{ - generators, generators::random_header_range, TestHeaderDownloader, TestHeadersClient, - }; + use reth_interfaces::test_utils::{TestHeaderDownloader, TestHeadersClient}; use reth_provider::BlockNumReader; use tokio::sync::watch; @@ -551,6 +572,91 @@ mod tests { stage_test_suite!(HeadersTestRunner, headers); + /// Execute the stage with linear downloader, unwinds, and ensures that the database tables + /// along with the static files are cleaned up. + #[tokio::test] + async fn execute_with_linear_downloader_unwind() { + let mut runner = HeadersTestRunner::with_linear_downloader(); + let (checkpoint, previous_stage) = (1000, 1200); + let input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(checkpoint)), + }; + let headers = runner.seed_execution(input).expect("failed to seed execution"); + let rx = runner.execute(input); + + runner.client.extend(headers.iter().rev().map(|h| h.clone().unseal())).await; + + // skip `after_execution` hook for linear downloader + let tip = headers.last().unwrap(); + runner.send_tip(tip.hash()); + + let result = rx.await.unwrap(); + runner.db().factory.static_file_provider().commit().unwrap(); + assert_matches!(result, Ok(ExecOutput { checkpoint: StageCheckpoint { + block_number, + stage_checkpoint: Some(StageUnitCheckpoint::Headers(HeadersCheckpoint { + block_range: CheckpointBlockRange { + from, + to + }, + progress: EntitiesCheckpoint { + processed, + total, + } + })) + }, done: true }) if block_number == tip.number && + from == checkpoint && to == previous_stage && + // -1 because we don't need to download the local head + processed == checkpoint + headers.len() as u64 - 1 && total == tip.number + ); + assert!(runner.validate_execution(input, result.ok()).is_ok(), "validation failed"); + assert!(runner.stage().hash_collector.is_empty()); + assert!(runner.stage().header_collector.is_empty()); + + // let's insert some blocks using append_blocks_with_state + let sealed_headers = + random_header_range(&mut generators::rng(), tip.number..tip.number + 10, tip.hash()); + + // make them sealed blocks with senders by converting them to empty blocks + let sealed_blocks = sealed_headers + .iter() + .map(|header| { + SealedBlockWithSenders::new( + SealedBlock::new(header.clone(), BlockBody::default()), + vec![], + ) + .unwrap() + }) + .collect(); + + // append the blocks + let provider = runner.db().factory.provider_rw().unwrap(); + provider + .append_blocks_with_state( + sealed_blocks, + BundleStateWithReceipts::default(), + HashedPostState::default(), + TrieUpdates::default(), + None, + ) + .unwrap(); + provider.commit().unwrap(); + + // now we can unwind 10 blocks + let unwind_input = UnwindInput { + checkpoint: StageCheckpoint::new(tip.number + 10), + unwind_to: tip.number, + bad_block: None, + }; + + let unwind_output = runner.unwind(unwind_input).await.unwrap(); + assert_eq!(unwind_output.checkpoint.block_number, tip.number); + + // validate the unwind, ensure that the tables are cleaned up + assert!(runner.validate_unwind(unwind_input).is_ok()); + } + /// Execute the stage with linear downloader #[tokio::test] async fn execute_with_linear_downloader() { From 766e77a8113eb8b852c3fc9a028bdce881133c82 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 24 Apr 2024 17:00:25 -0400 Subject: [PATCH 036/250] chore: log failed tip fetch only after 20 tries (#7850) --- crates/node-core/src/node_config.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 38f6ee4d9..608f12cad 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -425,6 +425,7 @@ impl NodeConfig { Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); + let mut fetch_failures = 0; loop { match get_single_header(&client, tip).await { Ok(tip_header) => { @@ -432,7 +433,10 @@ impl NodeConfig { return Ok(tip_header); } Err(error) => { - error!(target: "reth::cli", %error, "Failed to fetch the tip. Retrying..."); + fetch_failures += 1; + if fetch_failures % 20 == 0 { + error!(target: "reth::cli", ?fetch_failures, %error, "Failed to fetch the tip. Retrying..."); + } } } } From 0e8e57318041afe129d24b150b414abf0ea3e625 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Wed, 24 Apr 2024 23:34:41 +0200 Subject: [PATCH 037/250] chore: rename exex example crate names (#7851) --- Cargo.lock | 72 +++++++++++++++--------------- examples/exex/minimal/Cargo.toml | 2 +- examples/exex/op-bridge/Cargo.toml | 2 +- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d96e0fe3b..291638c49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2758,6 +2758,42 @@ dependencies = [ "tokio", ] +[[package]] +name = "exex-minimal" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "tokio", +] + +[[package]] +name = "exex-op-bridge" +version = "0.0.0" +dependencies = [ + "alloy-sol-types", + "eyre", + "futures", + "itertools 0.12.1", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-tracing", + "rusqlite", + "tokio", +] + [[package]] name = "eyre" version = "0.6.12" @@ -4713,22 +4749,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "minimal" -version = "0.0.0" -dependencies = [ - "eyre", - "futures", - "reth", - "reth-exex", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-tracing", - "tokio", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5072,26 +5092,6 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "op-bridge" -version = "0.0.0" -dependencies = [ - "alloy-sol-types", - "eyre", - "futures", - "itertools 0.12.1", - "reth", - "reth-exex", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-provider", - "reth-tracing", - "rusqlite", - "tokio", -] - [[package]] name = "opaque-debug" version = "0.3.1" diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index fc6eba841..a7bcc327a 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "minimal" +name = "exex-minimal" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/exex/op-bridge/Cargo.toml b/examples/exex/op-bridge/Cargo.toml index 3d87b2801..d8669e914 100644 --- a/examples/exex/op-bridge/Cargo.toml +++ b/examples/exex/op-bridge/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "op-bridge" +name = "exex-op-bridge" version = "0.0.0" publish = false edition.workspace = true From 4cef3809e499dfc23a242a3b9b617b36c01ec221 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Wed, 24 Apr 2024 23:58:26 +0200 Subject: [PATCH 038/250] docs: update examples readme (#7852) --- examples/README.md | 72 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 11 deletions(-) diff --git a/examples/README.md b/examples/README.md index 847325f93..791851a46 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,17 +1,67 @@ -## Examples of how to use the Reth SDK +# Examples -This directory contains a number of examples showcasing various capabilities of -the `reth-*` crates. +These examples demonstrate the main features of some of Reth's crates and how to use them. -All examples can be executed with: - -``` -cargo run --example $name -``` - -A good starting point for the examples would be [`db-access`](db-access.rs) -and [`rpc-db`](rpc-db). +To run an example, use the command `cargo run -p `. If you've got an example you'd like to see here, please feel free to open an issue. Otherwise if you've got an example you'd like to add, please feel free to make a PR! + +## Node Builder + +| Example | Description | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./additional-rpc-namespace-in-cli) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./cli-extension-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom node](./custom-node) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | + +## ExEx + +| Example | Description | +| ---------------------------------- | --------------------------------------------------------------------------------- | +| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | + +## RPC + +| Example | Description | +| ----------------------- | --------------------------------------------------------------------------- | +| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Rethdatabase instance | + +## Database + +| Example | Description | +| --------------------------- | --------------------------------------------------------------- | +| [DB access](./db-access.rs) | Illustrates how to access Reth's database in a separate process | + +## Network + +| Example | Description | +| ---------------------------------- | ------------------------------------------------------------ | +| [Standalone network](./network.rs) | Illustrates how to use the network as a standalone component | + +## Mempool + +| Example | Description | +| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| [Trace pending transactions](./trace-transaction-cli) | Illustrates how to trace pending transactions as they arrive in the mempool | +| [Standalone txpool](./network-txpool.rs) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | + +## P2P + +| Example | Description | +| --------------------------- | ----------------------------------------------------------------- | +| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | +| [Polygon P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer on Polygon | + +## Misc + +| Example | Description | +| ---------------------------------- | ----------------------------------------------------------- | +| [Beacon API SSE](./beacon-api-sse) | Illustrates how to subscribe to beacon chain events via SSE | From 652be135c795e7a8518078de31f8b51e219f331e Mon Sep 17 00:00:00 2001 From: Mihir Wadekar Date: Thu, 25 Apr 2024 03:00:34 -0700 Subject: [PATCH 039/250] feat: adds panels for execution extension metrics (#7605) --- etc/grafana/dashboards/overview.json | 485 +++++++++++++++++++++++++++ 1 file changed, 485 insertions(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index e9b322f1b..40c120e0f 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -7942,6 +7942,491 @@ ], "title": "RPC Throughput", "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 273 + }, + "id": 214, + "panels": [], + "title": "Execution Extensions", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of canonical state notifications sent to an ExEx.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 282 + }, + "id": 215, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_notifications_sent_total{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Total Notifications Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Notifications Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of events an ExEx has sent to the manager.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 282 + }, + "id": 216, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_events_sent_total{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Total Events Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Events Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current and Max capacity of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 290 + }, + "id": 218, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_current_capacity{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Current size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "max_over_time(reth_exex_manager_max_capacity{instance=~\"$instance\"}[1h])", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "C" + } + ], + "title": "Current and Max Capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current size of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 290 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_buffer_size{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of ExExs on the node", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "align": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 298 + }, + "id": 220, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "8.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_num_exexs{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Number of ExExs", + "range": true, + "refId": "A" + } + ], + "title": "Number of ExExs", + "type": "stat" } ], "refresh": "30s", From 9f0874d59f7010cd553f0a150ea7370464e82f9e Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 12:53:36 +0200 Subject: [PATCH 040/250] chore: add `reth-stages-api` to `CODEOWNERS` (#7865) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 8efa8da85..b7bd14f8e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -15,6 +15,7 @@ crates/rpc/ @mattsse @Rjected crates/rpc/rpc-types @mattsse @Rjected @Evalir crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir crates/stages/ @onbjerg @rkrasiuk @shekhirin +crates/stages-api/ @onbjerg @rkrasiuk @shekhirin crates/static-file @joshieDo @shekhirin crates/storage/ @rakita @joshieDo @shekhirin crates/tasks @mattsse From d0382fb88d90e8d7cf30eb3a0bc4392e92e62d1f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 25 Apr 2024 13:01:34 +0200 Subject: [PATCH 041/250] chore: add `trie-parallel` to `CODEOWNERS` (#7866) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index b7bd14f8e..d3d20e501 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,4 +22,5 @@ crates/tasks @mattsse crates/tracing @onbjerg crates/transaction-pool/ @mattsse crates/trie @rkrasiuk +crates/trie-parallel @rkrasiuk .github/ @onbjerg @gakonst @DaniPopes From 90bf4005e0f43d4eb8d64f1cfd4887edef68080a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 25 Apr 2024 13:05:03 +0200 Subject: [PATCH 042/250] chore: merge consensus and revm owners (#7867) --- CODEOWNERS | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index d3d20e501..f6ff05a6e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,16 +1,14 @@ * @gakonst bin/ @onbjerg crates/blockchain-tree @rakita @rkrasiuk -crates/consensus/auto-seal @mattsse -crates/consensus/beacon @rkrasiuk @mattsse @Rjected +crates/consensus @rkrasiuk @mattsse @Rjected crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo -crates/revm/src/ @rakita -crates/revm/ @mattsse +crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected crates/rpc/rpc-types @mattsse @Rjected @Evalir crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir From 132f7fbd80ba74da50734bd43b2ea9ab3f151155 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 25 Apr 2024 13:12:12 +0200 Subject: [PATCH 043/250] chore: remove `evalir` from `CODEOWNERS` (#7868) --- CODEOWNERS | 2 -- 1 file changed, 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index f6ff05a6e..155d8581d 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -10,8 +10,6 @@ crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected -crates/rpc/rpc-types @mattsse @Rjected @Evalir -crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir crates/stages/ @onbjerg @rkrasiuk @shekhirin crates/stages-api/ @onbjerg @rkrasiuk @shekhirin crates/static-file @joshieDo @shekhirin From 0f7e3541b17e3ca70e33146d9b05b8f024ce6497 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 13:36:53 +0200 Subject: [PATCH 044/250] chore: add `cli/*` to `CODEOWNERS` (#7870) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 155d8581d..3ea162bf6 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,6 +1,7 @@ * @gakonst bin/ @onbjerg crates/blockchain-tree @rakita @rkrasiuk +crates/cli/ @onbjerg @mattsse crates/consensus @rkrasiuk @mattsse @Rjected crates/exex @onbjerg @shekhirin crates/metrics @onbjerg From c7008deef8d7a86706673c25255cab795e6e3108 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 13:47:27 +0200 Subject: [PATCH 045/250] fix(cli): set start header (#7725) Co-authored-by: Roman Krasiuk --- Cargo.lock | 8 ++++---- Makefile | 2 +- bin/reth/src/commands/import.rs | 20 ++++++++++++++++---- crates/net/downloaders/src/file_client.rs | 6 ------ 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 291638c49..90dbf1fa3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8934,18 +8934,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", diff --git a/Makefile b/Makefile index 187de174d..82994b3c2 100644 --- a/Makefile +++ b/Makefile @@ -316,7 +316,7 @@ lint: make fmt && \ make lint-reth && \ make lint-op-reth && \ - make lint-other-targets \ + make lint-other-targets && \ make lint-codespell fix-lint-reth: diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 4542f10be..0136e0e5e 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -29,7 +29,10 @@ use reth_node_core::init::init_genesis; use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; -use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, + StageCheckpointReader, +}; use reth_stages::{ prelude::*, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, @@ -158,8 +161,7 @@ impl ImportCommand { "Importing chain file chunk" ); - // override the tip - let tip = file_client.tip().expect("file client has no tip"); + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; info!(target: "reth::cli", "Chain file chunk read"); let (mut pipeline, events) = self @@ -221,15 +223,25 @@ impl ImportCommand { eyre::bail!("unable to import non canonical blocks"); } + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) .build(file_client.clone(), consensus.clone()) .into_task(); - header_downloader.update_local_head(file_client.start_header().unwrap()); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) .build(file_client.clone(), consensus.clone(), provider_factory.clone()) .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. body_downloader .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) .expect("failed to set download range"); diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index b5b7aceae..ce8f3898b 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -179,12 +179,6 @@ impl FileClient { self.headers.get(&self.max_block()?).map(|h| h.clone().seal_slow()) } - /// Clones and returns the lowest header of this client has or `None` if empty. Seals header - /// before returning. - pub fn start_header(&self) -> Option { - self.headers.get(&self.min_block()?).map(|h| h.clone().seal_slow()) - } - /// Returns true if all blocks are canonical (no gaps) pub fn has_canonical_blocks(&self) -> bool { if self.headers.is_empty() { From f6e68e28eb0bea3d30b6722b2a8eb9b52d34c95e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 14:23:51 +0200 Subject: [PATCH 046/250] feat(op): timestamp below bedrock (#7768) --- Cargo.lock | 1 - crates/primitives/Cargo.toml | 1 - crates/primitives/src/header.rs | 25 +++++++++++++++++-------- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 90dbf1fa3..1695adafd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7156,7 +7156,6 @@ dependencies = [ "byteorder", "bytes", "c-kzg", - "cfg-if", "clap", "criterion", "derive_more", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3e08655db..e3828c913 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -51,7 +51,6 @@ tempfile = { workspace = true, optional = true } thiserror.workspace = true zstd = { version = "0.13", features = ["experimental"], optional = true } roaring = "0.10.2" -cfg-if = "1.0.0" # `test-utils` feature hash-db = { version = "~0.15", optional = true } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 899fcb368..a06be2625 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -776,6 +776,17 @@ impl SealedHeader { } // timestamp in past check + #[cfg(feature = "optimism")] + if chain_spec.is_bedrock_active_at_block(self.header.number) && + self.header.is_timestamp_in_past(parent.timestamp) + { + return Err(HeaderValidationError::TimestampIsInPast { + parent_timestamp: parent.timestamp, + timestamp: self.timestamp, + }) + } + + #[cfg(not(feature = "optimism"))] if self.header.is_timestamp_in_past(parent.timestamp) { return Err(HeaderValidationError::TimestampIsInPast { parent_timestamp: parent.timestamp, @@ -786,16 +797,14 @@ impl SealedHeader { // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. - cfg_if::cfg_if! { - if #[cfg(feature = "optimism")] { - // On Optimism, the gas limit can adjust instantly, so we skip this check - // if the optimism feature is enabled in the chain spec. - if !chain_spec.is_optimism() { - self.validate_gas_limit(parent, chain_spec)?; - } - } else { + if cfg!(feature = "optimism") { + // On Optimism, the gas limit can adjust instantly, so we skip this check + // if the optimism feature is enabled in the chain spec. + if !chain_spec.is_optimism() { self.validate_gas_limit(parent, chain_spec)?; } + } else { + self.validate_gas_limit(parent, chain_spec)?; } // EIP-1559 check base fee From 33e7e0208f25ef8d171a2a42adbceff73197bfd0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 14:25:54 +0200 Subject: [PATCH 047/250] fix: derank peers that responded with bad data (#7854) Co-authored-by: Oliver Nordbjerg --- crates/net/network/src/fetch/mod.rs | 76 +++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 19 deletions(-) diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 19c605fb9..1f85f242d 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -77,8 +77,16 @@ impl StateFetcher { best_number: u64, timeout: Arc, ) { - self.peers - .insert(peer_id, Peer { state: PeerState::Idle, best_hash, best_number, timeout }); + self.peers.insert( + peer_id, + Peer { + state: PeerState::Idle, + best_hash, + best_number, + timeout, + last_response_likely_bad: false, + }, + ); } /// Removes the peer from the peer list, after which it is no longer available for future @@ -119,14 +127,29 @@ impl StateFetcher { } /// Returns the _next_ idle peer that's ready to accept a request, - /// prioritizing those with the lowest timeout/latency. - /// Once a peer has been yielded, it will be moved to the end of the map - fn next_peer(&mut self) -> Option { - self.peers - .iter() - .filter(|(_, peer)| peer.state.is_idle()) - .min_by_key(|(_, peer)| peer.timeout()) - .map(|(id, _)| *id) + /// prioritizing those with the lowest timeout/latency and those that recently responded with + /// adequate data. + fn next_best_peer(&mut self) -> Option { + let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle()); + + let mut best_peer = idle.next()?; + + for maybe_better in idle { + // replace best peer if our current best peer sent us a bad response last time + if best_peer.1.last_response_likely_bad && !maybe_better.1.last_response_likely_bad { + best_peer = maybe_better; + continue + } + + // replace best peer if this peer has better rtt + if maybe_better.1.timeout() < best_peer.1.timeout() && + !maybe_better.1.last_response_likely_bad + { + best_peer = maybe_better; + } + } + + Some(*best_peer.0) } /// Returns the next action to return @@ -136,7 +159,7 @@ impl StateFetcher { return PollAction::NoRequests } - let Some(peer_id) = self.next_peer() else { return PollAction::NoPeersAvailable }; + let Some(peer_id) = self.next_best_peer() else { return PollAction::NoPeersAvailable }; let request = self.queued_requests.pop_front().expect("not empty"); let request = self.prepare_block_request(peer_id, request); @@ -249,6 +272,9 @@ impl StateFetcher { } if let Some(peer) = self.peers.get_mut(&peer_id) { + // update the peer's response state + peer.last_response_likely_bad = is_likely_bad_response; + // If the peer is still ready to accept new requests, we try to send a followup // request immediately. if peer.state.on_request_finished() && !is_error && !is_likely_bad_response { @@ -268,11 +294,16 @@ impl StateFetcher { peer_id: PeerId, res: RequestResult>, ) -> Option { + let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); + if let Some(resp) = self.inflight_bodies_requests.remove(&peer_id) { let _ = resp.response.send(res.map(|b| (peer_id, b).into())); } if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.state.on_request_finished() { + // update the peer's response state + peer.last_response_likely_bad = is_likely_bad_response; + + if peer.state.on_request_finished() && !is_likely_bad_response { return self.followup_request(peer_id) } } @@ -307,6 +338,13 @@ struct Peer { best_number: u64, /// Tracks the current timeout value we use for the peer. timeout: Arc, + /// Tracks whether the peer has recently responded with a likely bad response. + /// + /// This is used to de-rank the peer if there are other peers available. + /// This exists because empty responses may not be penalized (e.g. when blocks near the tip are + /// downloaded), but we still want to avoid requesting from the same peer again if it has the + /// lowest timeout. + last_response_likely_bad: bool, } impl Peer { @@ -462,17 +500,17 @@ mod tests { fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1))); fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1))); - let first_peer = fetcher.next_peer().unwrap(); + let first_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); // Pending disconnect for first_peer fetcher.on_pending_disconnect(&first_peer); // first_peer now isn't idle, so we should get other peer - let second_peer = fetcher.next_peer().unwrap(); + let second_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); assert_ne!(first_peer, second_peer); // without idle peers, returns None fetcher.on_pending_disconnect(&second_peer); - assert_eq!(fetcher.next_peer(), None); + assert_eq!(fetcher.next_best_peer(), None); } #[tokio::test] @@ -491,13 +529,13 @@ mod tests { fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50))); // Must always get peer1 (lowest timeout) - assert_eq!(fetcher.next_peer(), Some(peer1)); - assert_eq!(fetcher.next_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(), Some(peer1)); // peer2's timeout changes below peer1's peer2_timeout.store(10, Ordering::Relaxed); // Then we get peer 2 always (now lowest) - assert_eq!(fetcher.next_peer(), Some(peer2)); - assert_eq!(fetcher.next_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(), Some(peer2)); } #[tokio::test] From 08cdd67cb00776047d6260ff1ba48260a71940b4 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 25 Apr 2024 19:28:17 +0700 Subject: [PATCH 048/250] fix(auto-seal): build dev blocks with withdrawals (#7857) --- crates/consensus/auto-seal/src/lib.rs | 32 ++++++++++++++++++-------- crates/consensus/auto-seal/src/task.rs | 12 +++++++--- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index f6de63979..c09dcbcc8 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -24,8 +24,8 @@ use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Bloom, - ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, B256, - EMPTY_OMMER_ROOT_HASH, U256, + ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, + B256, U256, }; use reth_provider::{ BlockExecutor, BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, @@ -270,6 +270,8 @@ impl StorageInner { pub(crate) fn build_header_template( &self, transactions: &[TransactionSigned], + ommers: &[Header], + withdrawals: Option<&Withdrawals>, chain_spec: Arc, ) -> Header { let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); @@ -281,12 +283,12 @@ impl StorageInner { let mut header = Header { parent_hash: self.best_hash, - ommers_hash: EMPTY_OMMER_ROOT_HASH, + ommers_hash: proofs::calculate_ommers_root(ommers), beneficiary: Default::default(), state_root: Default::default(), transactions_root: Default::default(), receipts_root: Default::default(), - withdrawals_root: None, + withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), logs_bloom: Default::default(), difficulty: U256::from(2), number: self.best_block + 1, @@ -420,6 +422,8 @@ impl StorageInner { pub(crate) fn build_and_execute( &mut self, transactions: Vec, + ommers: Vec
, + withdrawals: Option, client: &impl StateProviderFactory, chain_spec: Arc, evm_config: EvmConfig, @@ -427,11 +431,21 @@ impl StorageInner { where EvmConfig: ConfigureEvm, { - let header = self.build_header_template(&transactions, chain_spec.clone()); + let header = self.build_header_template( + &transactions, + &ommers, + withdrawals.as_ref(), + chain_spec.clone(), + ); - let block = Block { header, body: transactions, ommers: vec![], withdrawals: None } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; + let block = Block { + header, + body: transactions, + ommers: ommers.clone(), + withdrawals: withdrawals.clone(), + } + .with_recovered_senders() + .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); @@ -447,7 +461,7 @@ impl StorageInner { let (bundle_state, gas_used) = self.execute(&block, &mut executor)?; let Block { header, body, .. } = block.block; - let body = BlockBody { transactions: body, ommers: vec![], withdrawals: None }; + let body = BlockBody { transactions: body, ommers, withdrawals }; let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { let mut sum_blob_gas_used = 0; diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 53bfc6356..6009cd810 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -3,7 +3,9 @@ use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; +use reth_primitives::{ + Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders, Withdrawals, +}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; @@ -134,9 +136,13 @@ where (recovered.into_signed(), signer) }) .unzip(); + let ommers = vec![]; + let withdrawals = Some(Withdrawals::default()); match storage.build_and_execute( transactions.clone(), + ommers.clone(), + withdrawals.clone(), &client, chain_spec, evm_config, @@ -193,8 +199,8 @@ where let block = Block { header: new_header.clone().unseal(), body: transactions, - ommers: vec![], - withdrawals: None, + ommers, + withdrawals, }; let sealed_block = block.seal_slow(); From 1c17f08ad209a1ae120436461ceb2f7f5391811c Mon Sep 17 00:00:00 2001 From: Rodrigo Herrera Date: Thu, 25 Apr 2024 06:50:04 -0600 Subject: [PATCH 049/250] Replace TransactionKind with alloy_primitives::TxKind (#7859) --- Cargo.lock | 4 +- Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/consensus/common/src/validation.rs | 8 +- .../interfaces/src/test_utils/generators.rs | 9 +- crates/net/eth-wire-types/src/blocks.rs | 12 +- crates/net/eth-wire-types/src/transactions.rs | 60 ++----- crates/net/network/tests/it/requests.rs | 6 +- crates/optimism/evm/src/execute.rs | 10 +- crates/optimism/node/src/txpool.rs | 6 +- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/revm/compat.rs | 4 +- crates/primitives/src/revm/env.rs | 24 +-- crates/primitives/src/transaction/eip1559.rs | 8 +- crates/primitives/src/transaction/eip2930.rs | 10 +- crates/primitives/src/transaction/eip4844.rs | 6 +- crates/primitives/src/transaction/legacy.rs | 8 +- crates/primitives/src/transaction/mod.rs | 158 +++--------------- crates/primitives/src/transaction/optimism.rs | 10 +- crates/revm/src/optimism/processor.rs | 10 +- crates/revm/src/processor.rs | 4 +- .../rpc-types-compat/src/transaction/mod.rs | 4 +- .../rpc-types-compat/src/transaction/typed.rs | 18 +- crates/rpc/rpc/src/eth/api/transactions.rs | 7 +- .../codecs/derive/src/compact/generator.rs | 7 +- .../storage/codecs/derive/src/compact/mod.rs | 2 +- crates/transaction-pool/src/test_utils/gen.rs | 10 +- .../transaction-pool/src/test_utils/mock.rs | 22 +-- crates/transaction-pool/src/traits.rs | 22 +-- 29 files changed, 162 insertions(+), 297 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1695adafd..6b2e0d017 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -262,9 +262,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99bbad0a6b588ef4aec1b5ddbbfdacd9ef04e00b979617765b03174318ee1f3a" +checksum = "50c715249705afa1e32be79dabfd35e2ef0f1cc02ad2cf48c9d1e20026ee637b" dependencies = [ "alloy-rlp", "arbitrary", diff --git a/Cargo.toml b/Cargo.toml index 7ef645f39..73597b311 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = # eth alloy-chains = "0.1.15" -alloy-primitives = "0.7.0" +alloy-primitives = "0.7.1" alloy-dyn-abi = "0.7.0" alloy-sol-types = "0.7.0" alloy-rlp = "0.3.4" diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 08f588cd2..b1688fda9 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1273,7 +1273,7 @@ mod tests { revm_primitives::AccountInfo, stage::StageCheckpoint, Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature, - Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxKind, Withdrawals, B256, MAINNET, }; use reth_provider::{ @@ -1453,7 +1453,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce, gas_limit: 21_000, - to: TransactionKind::Call(Address::ZERO), + to: TxKind::Call(Address::ZERO), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 949061882..3ed01f637 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -331,8 +331,8 @@ mod tests { }; use reth_primitives::{ hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, Bytes, - ChainSpecBuilder, Signature, TransactionKind, TransactionSigned, Withdrawal, Withdrawals, - MAINNET, U256, + ChainSpecBuilder, Signature, TransactionSigned, TxKind, Withdrawal, Withdrawals, MAINNET, + U256, }; use std::ops::RangeBounds; @@ -448,7 +448,7 @@ mod tests { nonce, gas_price: 0x28f000fff, gas_limit: 10, - to: TransactionKind::Call(Address::default()), + to: TxKind::Call(Address::default()), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), @@ -470,7 +470,7 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - to: TransactionKind::Call(Address::default()), + to: TxKind::Call(Address::default()), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index e601d9629..0f1930b60 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -4,8 +4,7 @@ use rand::{ }; use reth_primitives::{ proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionKind, TransactionSigned, TxLegacy, B256, - U256, + SealedHeader, StorageEntry, Transaction, TransactionSigned, TxKind, TxLegacy, B256, U256, }; use secp256k1::{KeyPair, Secp256k1}; use std::{ @@ -79,7 +78,7 @@ pub fn random_tx(rng: &mut R) -> Transaction { nonce: rng.gen::().into(), gas_price: rng.gen::().into(), gas_limit: rng.gen::().into(), - to: TransactionKind::Call(rng.gen()), + to: TxKind::Call(rng.gen()), value: U256::from(rng.gen::()), input: Bytes::default(), }) @@ -395,7 +394,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TransactionKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::from(0_u64), input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, @@ -427,7 +426,7 @@ mod tests { nonce: 9, gas_price: 20 * 10_u128.pow(9), gas_limit: 21000, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(10_u128.pow(18)), input: Bytes::default(), }); diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index fa6365c20..36b8e6e8c 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -131,8 +131,8 @@ mod tests { use crate::{message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, TransactionKind, - TransactionSigned, TxLegacy, U256, + hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, + TransactionSigned, TxKind, TxLegacy, U256, }; use std::str::FromStr; @@ -383,7 +383,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -398,7 +398,7 @@ mod tests { nonce: 0x9u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), Signature { @@ -455,7 +455,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -471,7 +471,7 @@ mod tests { nonce: 0x9u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 5d48211be..f19bbdcc7 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -80,8 +80,8 @@ mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, PooledTransactionsElement, Signature, Transaction, TransactionKind, TransactionSigned, - TxEip1559, TxLegacy, U256, + hex, PooledTransactionsElement, Signature, Transaction, TransactionSigned, TxEip1559, + TxKind, TxLegacy, U256, }; use std::str::FromStr; @@ -130,9 +130,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -154,9 +152,7 @@ mod tests { nonce: 0x09u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), @@ -200,9 +196,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -224,9 +218,7 @@ mod tests { nonce: 0x09u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), @@ -271,9 +263,7 @@ mod tests { nonce: 15u64, gas_price: 2200000000, gas_limit: 34811u64, - to: TransactionKind::Call( - hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into(), - ), + to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), }), @@ -296,9 +286,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000u64, - to: TransactionKind::Call( - hex!("61815774383099e24810ab832a5b2a5425c154d5").into(), - ), + to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -321,9 +309,7 @@ mod tests { nonce: 3u64, gas_price: 2000000000, gas_limit: 10000000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -345,9 +331,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), }), @@ -369,9 +353,7 @@ mod tests { nonce: 2u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -420,9 +402,7 @@ mod tests { nonce: 15u64, gas_price: 2200000000, gas_limit: 34811u64, - to: TransactionKind::Call( - hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into(), - ), + to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), }), @@ -445,9 +425,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000u64, - to: TransactionKind::Call( - hex!("61815774383099e24810ab832a5b2a5425c154d5").into(), - ), + to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -470,9 +448,7 @@ mod tests { nonce: 3u64, gas_price: 2000000000, gas_limit: 10000000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -494,9 +470,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), }), @@ -518,9 +492,7 @@ mod tests { nonce: 2u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index decc9ee25..4e36f191c 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -12,8 +12,8 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_primitives::{ - Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionKind, - TransactionSigned, TxEip2930, U256, + Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionSigned, + TxEip2930, TxKind, U256, }; use reth_provider::test_utils::MockEthProvider; use std::sync::Arc; @@ -25,7 +25,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { nonce: rng.gen(), gas_price: rng.gen(), gas_limit: rng.gen(), - to: TransactionKind::Create, + to: TxKind::Create, value: U256::from(rng.gen::()), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index ef87cce1d..5b7d797da 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -539,7 +539,7 @@ mod tests { use super::*; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, }; use reth_revm::{database::StateProviderDatabase, L1_BLOCK_CONTRACT}; use std::{collections::HashMap, str::FromStr}; @@ -610,7 +610,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -619,7 +619,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), @@ -690,7 +690,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -699,7 +699,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 73097ce27..7ee1bb9ec 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -202,8 +202,8 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use reth_primitives::{ - Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, - TxDeposit, MAINNET, U256, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, TxKind, + MAINNET, U256, }; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ @@ -225,7 +225,7 @@ mod tests { let deposit_tx = Transaction::Deposit(TxDeposit { source_hash: Default::default(), from: signer, - to: TransactionKind::Create, + to: TxKind::Create, mint: None, value: U256::ZERO, gas_limit: 0u64, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 167a64545..9e77b4c15 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -97,7 +97,7 @@ pub use transaction::{ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, AccessList, AccessListItem, IntoRecoveredTransaction, InvalidTransactionError, Signature, - Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, + Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -113,7 +113,7 @@ pub use alloy_primitives::{ eip191_hash_message, hex, hex_literal, keccak256, ruint, utils::format_ether, Address, BlockHash, BlockNumber, Bloom, BloomInput, Bytes, ChainId, Selector, StorageKey, - StorageValue, TxHash, TxIndex, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, + StorageValue, TxHash, TxIndex, TxKind, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, }; pub use reth_ethereum_forks::*; pub use revm_primitives::{self, JumpMap}; diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs index 6c9474f7c..972770882 100644 --- a/crates/primitives/src/revm/compat.rs +++ b/crates/primitives/src/revm/compat.rs @@ -1,4 +1,4 @@ -use crate::{revm_primitives::AccountInfo, Account, Address, TransactionKind, KECCAK_EMPTY, U256}; +use crate::{revm_primitives::AccountInfo, Account, Address, TxKind, KECCAK_EMPTY, U256}; use revm::{ interpreter::gas::validate_initial_tx_gas, primitives::{MergeSpec, ShanghaiSpec}, @@ -34,7 +34,7 @@ pub fn into_revm_acc(reth_acc: Account) -> AccountInfo { #[inline] pub fn calculate_intrinsic_gas_after_merge( input: &[u8], - kind: &TransactionKind, + kind: &TxKind, access_list: &[(Address, Vec)], is_shanghai: bool, ) -> u64 { diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index edfc07f80..b13a7018f 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -2,8 +2,8 @@ use crate::{ constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, recover_signer_unchecked, revm_primitives::{BlockEnv, Env, TransactTo, TxEnv}, - Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionKind, - TransactionSignedEcRecovered, B256, U256, + Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionSignedEcRecovered, TxKind, + B256, U256, }; #[cfg(feature = "optimism")] @@ -208,8 +208,8 @@ where tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -224,8 +224,8 @@ where tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -247,8 +247,8 @@ where tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -270,8 +270,8 @@ where tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -295,8 +295,8 @@ where tx_env.gas_price = U256::ZERO; tx_env.gas_priority_fee = None; match tx.to { - TransactionKind::Call(to) => tx_env.transact_to = TransactTo::Call(to), - TransactionKind::Create => tx_env.transact_to = TransactTo::create(), + TxKind::Call(to) => tx_env.transact_to = TransactTo::Call(to), + TxKind::Create => tx_env.transact_to = TransactTo::create(), } tx_env.value = tx.value; tx_env.data = tx.input.clone(); diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 229da9983..68da7d8d9 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,5 +1,5 @@ use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -41,7 +41,7 @@ pub struct TxEip1559 { pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -226,7 +226,7 @@ impl TxEip1559 { mod tests { use super::TxEip1559; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, AccessList, Address, Transaction, TransactionSigned, B256, U256, }; use std::str::FromStr; @@ -243,7 +243,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TransactionKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::ZERO, input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index fde594d7b..86794a512 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,5 +1,5 @@ use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -29,7 +29,7 @@ pub struct TxEip2930 { pub gas_limit: u64, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -189,7 +189,7 @@ impl TxEip2930 { mod tests { use super::TxEip2930; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, Address, Bytes, Transaction, TransactionSigned, U256, }; use alloy_rlp::{Decodable, Encodable}; @@ -202,7 +202,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TransactionKind::Create, + to: TxKind::Create, value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), @@ -225,7 +225,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TransactionKind::Call(Address::default()), + to: TxKind::Call(Address::default()), value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index a24a87b11..0a3790abe 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -1,7 +1,7 @@ use super::access_list::AccessList; use crate::{ - constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TransactionKind, - TxType, B256, U256, + constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TxKind, TxType, + B256, U256, }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use reth_codecs::{main_codec, Compact}; @@ -54,7 +54,7 @@ pub struct TxEip4844 { pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index eba89f93d..f2440e13c 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,4 +1,4 @@ -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -28,7 +28,7 @@ pub struct TxLegacy { pub gas_limit: u64, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -173,7 +173,7 @@ impl TxLegacy { mod tests { use super::TxLegacy; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, Address, Transaction, TransactionSigned, B256, U256, }; @@ -190,7 +190,7 @@ mod tests { nonce: 0x18, gas_price: 0xfa56ea00, gas_limit: 119902, - to: TransactionKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), + to: TxKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), value: U256::from(0x1c6bf526340000u64), input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), }); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c2df13305..dae6ab076 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,6 +1,6 @@ #[cfg(any(feature = "arbitrary", feature = "zstd-codec"))] use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; -use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, B256, U256}; +use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, TxKind, B256, U256}; use alloy_eips::eip2718::Eip2718Error; use alloy_rlp::{ @@ -176,9 +176,9 @@ impl Transaction { } } - /// Gets the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - pub fn kind(&self) -> &TransactionKind { + /// Gets the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + pub fn kind(&self) -> &TxKind { match self { Transaction::Legacy(TxLegacy { to, .. }) | Transaction::Eip2930(TxEip2930 { to, .. }) | @@ -194,7 +194,7 @@ impl Transaction { /// /// Returns `None` if this is a `CREATE` transaction. pub fn to(&self) -> Option
{ - self.kind().to() + self.kind().to().copied() } /// Get the transaction's type @@ -641,7 +641,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, })) @@ -655,7 +655,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, @@ -677,7 +677,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, @@ -698,7 +698,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, @@ -829,118 +829,6 @@ impl Encodable for Transaction { } } -/// Whether or not the transaction is a contract creation. -#[derive_arbitrary(compact, rlp)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -pub enum TransactionKind { - /// A transaction that creates a contract. - #[default] - Create, - /// A transaction that calls a contract or transfer. - Call(Address), -} - -impl TransactionKind { - /// Returns the address of the contract that will be called or will receive the transfer. - pub fn to(self) -> Option
{ - match self { - TransactionKind::Create => None, - TransactionKind::Call(to) => Some(to), - } - } - - /// Returns true if the transaction is a contract creation. - #[inline] - pub fn is_create(self) -> bool { - matches!(self, TransactionKind::Create) - } - - /// Returns true if the transaction is a contract call. - #[inline] - pub fn is_call(self) -> bool { - matches!(self, TransactionKind::Call(_)) - } - - /// Calculates a heuristic for the in-memory size of the [TransactionKind]. - #[inline] - fn size(self) -> usize { - mem::size_of::() - } -} - -impl From for TransactionKind { - fn from(kind: reth_rpc_types::TransactionKind) -> Self { - match kind { - reth_rpc_types::TransactionKind::Call(to) => Self::Call(to), - reth_rpc_types::TransactionKind::Create => Self::Create, - } - } -} - -impl Compact for TransactionKind { - fn to_compact(self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - match self { - TransactionKind::Create => 0, - TransactionKind::Call(address) => { - address.to_compact(buf); - 1 - } - } - } - - fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { - match identifier { - 0 => (TransactionKind::Create, buf), - 1 => { - let (addr, buf) = Address::from_compact(buf, buf.len()); - (TransactionKind::Call(addr), buf) - } - _ => unreachable!("Junk data in database: unknown TransactionKind variant"), - } - } -} - -impl Encodable for TransactionKind { - /// This encodes the `to` field of a transaction request. - /// If the [TransactionKind] is a [TransactionKind::Call] it will encode the inner address: - /// `rlp(address)` - /// - /// If the [TransactionKind] is a [TransactionKind::Create] it will encode an empty list: - /// `rlp([])`, which is also - fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { - match self { - TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => out.put_u8(EMPTY_STRING_CODE), - } - } - - fn length(&self) -> usize { - match self { - TransactionKind::Call(to) => to.length(), - TransactionKind::Create => 1, // EMPTY_STRING_CODE is a single byte - } - } -} - -impl Decodable for TransactionKind { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if let Some(&first) = buf.first() { - if first == EMPTY_STRING_CODE { - buf.advance(1); - Ok(TransactionKind::Create) - } else { - let addr =
::decode(buf)?; - Ok(TransactionKind::Call(addr)) - } - } else { - Err(RlpError::InputTooShort) - } - } -} - /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. @@ -1856,10 +1744,10 @@ mod tests { use crate::{ hex, sign_message, transaction::{ - from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, - TransactionKind, TxEip1559, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, - MIN_LENGTH_EIP2930_TX_ENCODED, MIN_LENGTH_EIP4844_TX_ENCODED, - MIN_LENGTH_LEGACY_TX_ENCODED, PARALLEL_SENDER_RECOVERY_THRESHOLD, + from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, TxEip1559, + TxKind, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, MIN_LENGTH_EIP2930_TX_ENCODED, + MIN_LENGTH_EIP4844_TX_ENCODED, MIN_LENGTH_LEGACY_TX_ENCODED, + PARALLEL_SENDER_RECOVERY_THRESHOLD, }, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip2930, TxEip4844, B256, U256, @@ -1881,13 +1769,13 @@ mod tests { fn raw_kind_encoding_sanity() { // check the 0x80 encoding for Create let mut buf = Vec::new(); - TransactionKind::Create.encode(&mut buf); + TxKind::Create.encode(&mut buf); assert_eq!(buf, vec![0x80]); // check decoding let buf = [0x80]; - let decoded = TransactionKind::decode(&mut &buf[..]).unwrap(); - assert_eq!(decoded, TransactionKind::Create); + let decoded = TxKind::decode(&mut &buf[..]).unwrap(); + assert_eq!(decoded, TxKind::Create); } #[test] @@ -1963,7 +1851,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( + to: TxKind::Call( Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), ), value: U256::from(1000000000000000u64), @@ -1985,7 +1873,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], )), value: U256::from(693361000000000u64), @@ -2006,7 +1894,7 @@ mod tests { nonce: 3, gas_price: 2000000000, gas_limit: 10000000, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], )), value: U256::from(1000000000000000u64), @@ -2028,7 +1916,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("61815774383099e24810ab832a5b2a5425c154d5")[..], )), value: U256::from(3000000000000000000u64), @@ -2050,7 +1938,7 @@ mod tests { nonce: 15, gas_price: 2200000000, gas_limit: 34811, - to: TransactionKind::Call(Address::from_slice( + to: TxKind::Call(Address::from_slice( &hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..], )), value: U256::from(1234), @@ -2339,7 +2227,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( + to: TxKind::Call( Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), ), value: U256::from(1000000000000000u64), @@ -2388,7 +2276,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( + to: TxKind::Call( Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), ), value: U256::from(1000000000000000u64), diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 0001347b5..f553f2aa6 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -1,4 +1,4 @@ -use crate::{Address, Bytes, TransactionKind, TxType, B256, U256}; +use crate::{Address, Bytes, TxKind, TxType, B256, U256}; use alloy_rlp::{ length_of_length, Decodable, Encodable, Error as DecodeError, Header, EMPTY_STRING_CODE, }; @@ -16,7 +16,7 @@ pub struct TxDeposit { pub from: Address, /// The address of the recipient account, or the null (zero-length) address if the deposited /// transaction is a contract creation. - pub to: TransactionKind, + pub to: TxKind, /// The ETH value to mint on L2. pub mint: Option, /// The ETH value to send to the recipient account. @@ -169,7 +169,7 @@ mod tests { let original = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, @@ -189,7 +189,7 @@ mod tests { let tx_deposit = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, @@ -211,7 +211,7 @@ mod tests { let tx_deposit = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index 78940c8b5..bd68023be 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -206,7 +206,7 @@ mod tests { }; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + StorageValue, Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, }; use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr, sync::Arc}; @@ -278,7 +278,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -287,7 +287,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), @@ -352,7 +352,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), ..Default::default() }), Signature::default(), @@ -361,7 +361,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: TxKind::Call(addr), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index f467b22a0..e6a85b77d 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -466,7 +466,7 @@ mod tests { bytes, constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TransactionKind, TxEip1559, MAINNET, + TxEip1559, TxKind, MAINNET, }; use revm::{Database, TransitionState}; use std::collections::HashMap; @@ -855,7 +855,7 @@ mod tests { chain_id, nonce: 1, gas_limit: 21_000, - to: TransactionKind::Call(Address::ZERO), + to: TxKind::Call(Address::ZERO), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 745d32e34..1004e93e2 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -2,8 +2,8 @@ use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; use reth_primitives::{ - BlockNumber, Transaction as PrimitiveTransaction, TransactionKind as PrimitiveTransactionKind, - TransactionSignedEcRecovered, TxType, B256, + BlockNumber, Transaction as PrimitiveTransaction, TransactionSignedEcRecovered, + TxKind as PrimitiveTransactionKind, TxType, B256, }; #[cfg(feature = "optimism")] use reth_rpc_types::optimism::OptimismTransactionFields; diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index 6b0ed5294..03f502a20 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: tx.kind.into(), + to: to_primitive_transaction_kind(tx.kind), value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,3 +56,13 @@ pub fn to_primitive_transaction( }), }) } + +/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TxKind] +pub fn to_primitive_transaction_kind( + kind: reth_rpc_types::TransactionKind, +) -> reth_primitives::TxKind { + match kind { + reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TxKind::Call(to), + reth_rpc_types::TransactionKind::Create => reth_primitives::TxKind::Create, + } +} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 43a75b68b..2188b8d25 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -15,9 +15,10 @@ use reth_primitives::{ eip4844::calc_blob_gasprice, revm::env::{fill_block_env_with_coinbase, tx_env_with_recovered}, Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, - IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, - TransactionKind::{Call, Create}, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, B256, U256, + IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, + TxKind::{Call, Create}, + B256, U256, }; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 8cd9070bb..03dab1a14 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -58,12 +58,7 @@ fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> To // it's hard to figure out with derive_macro which types have Bytes fields. // // This removes the requirement of the field to be placed last in the struct. - known_types.extend_from_slice(&[ - "TransactionKind", - "AccessList", - "Signature", - "CheckpointBlockRange", - ]); + known_types.extend_from_slice(&["TxKind", "AccessList", "Signature", "CheckpointBlockRange"]); // let mut handle = FieldListHandler::new(fields); let is_enum = fields.iter().any(|field| matches!(field, FieldTypes::EnumVariant(_))); diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index abc785edd..7614fa832 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -161,7 +161,7 @@ fn should_use_alt_impl(ftype: &String, segment: &syn::PathSegment) -> bool { /// length. pub fn get_bit_size(ftype: &str) -> u8 { match ftype { - "TransactionKind" | "bool" | "Option" | "Signature" => 1, + "TransactionKind" | "TxKind" | "bool" | "Option" | "Signature" => 1, "TxType" => 2, "u64" | "BlockNumber" | "TxNumber" | "ChainId" | "NumTransactions" => 4, "u128" => 5, diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 52a3127c7..2e3c71828 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -2,8 +2,8 @@ use crate::EthPooledTransaction; use rand::Rng; use reth_primitives::{ constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, Transaction, - TransactionKind, TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, - TxLegacy, B256, MAINNET, U256, + TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, TxKind, TxLegacy, B256, + MAINNET, U256, }; /// A generator for transactions for testing purposes. @@ -129,7 +129,7 @@ pub struct TransactionBuilder { /// processing. pub max_priority_fee_per_gas: u128, /// The recipient or contract address of the transaction. - pub to: TransactionKind, + pub to: TxKind, /// The value to be transferred in the transaction. pub value: U256, /// The list of addresses and storage keys that the transaction can access. @@ -246,7 +246,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction builder. pub const fn to(mut self, to: Address) -> Self { - self.to = TransactionKind::Call(to); + self.to = TxKind::Call(to); self } @@ -306,7 +306,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction, mutable reference version. pub fn set_to(&mut self, to: Address) -> &mut Self { - self.to = TransactionKind::Call(to); + self.to = TxKind::Call(to); self } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index d250b6c10..7eda40e58 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -16,8 +16,8 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, - TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, + TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, + TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -105,7 +105,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The transaction input data. @@ -128,7 +128,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The access list associated with the transaction. @@ -155,7 +155,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The access list associated with the transaction. @@ -176,7 +176,7 @@ pub enum MockTransaction { /// The transaction nonce. nonce: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The gas limit for the transaction. gas_limit: u64, /// The transaction input data. @@ -213,7 +213,7 @@ impl MockTransaction { nonce: 0, gas_price: 0, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), value: Default::default(), input: Default::default(), size: Default::default(), @@ -229,7 +229,7 @@ impl MockTransaction { max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -247,7 +247,7 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -272,7 +272,7 @@ impl MockTransaction { hash: B256::random(), sender: Address::random(), nonce: 0, - to: TransactionKind::Call(Address::random()), + to: TxKind::Call(Address::random()), gas_limit: 0, input: Bytes::new(), value: Default::default(), @@ -671,7 +671,7 @@ impl PoolTransaction for MockTransaction { } /// Returns the transaction kind associated with the transaction. - fn kind(&self) -> &TransactionKind { + fn kind(&self) -> &TxKind { match self { MockTransaction::Legacy { to, .. } | MockTransaction::Eip1559 { to, .. } | diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 459c0bf10..c5603ec7b 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -13,9 +13,9 @@ use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, - TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, TxHash, B256, - EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, + TryFromRecoveredTransaction, TxEip4844, TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -811,14 +811,14 @@ pub trait PoolTransaction: /// otherwise returns the gas price. fn priority_fee_or_price(&self) -> u128; - /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TransactionKind; + /// Returns the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> &TxKind; - /// Returns the recipient of the transaction if it is not a [TransactionKind::Create] + /// Returns the recipient of the transaction if it is not a [TxKind::Create] /// transaction. fn to(&self) -> Option
{ - (*self.kind()).to() + (*self.kind()).to().copied() } /// Returns the input data of this transaction. @@ -1056,9 +1056,9 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.priority_fee_or_price() } - /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TransactionKind { + /// Returns the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> &TxKind { self.transaction.kind() } From 79235a74fa5e4260e83a2ca63b74148cc1a29592 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 15:14:20 +0200 Subject: [PATCH 050/250] chore: add `node-*` crates to `CODEOWNERS` (#7869) --- CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CODEOWNERS b/CODEOWNERS index 3ea162bf6..be8243ea2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -7,6 +7,9 @@ crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk +crates/node-builder/ @mattsse @Rjected @onbjerg +crates/node-core/ @mattsse @Rjected @onbjerg +crates/node-ethereum/ @mattsse @Rjected crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo crates/revm/ @mattsse @rakita From ed45c3c10d832eb0d6ad0ccc7c2c33d380c5d16f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 15:37:55 +0200 Subject: [PATCH 051/250] chore: some touchups (#7873) --- crates/payload/optimism/src/builder.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 1d1a2dade..7d8efa689 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -250,13 +250,15 @@ where } = config; debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + let mut cumulative_gas_used = 0; let block_gas_limit: u64 = attributes .gas_limit .unwrap_or_else(|| initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX)); let base_fee = initialized_block_env.basefee.to::(); - let mut executed_txs = Vec::new(); + let mut executed_txs = Vec::with_capacity(attributes.transactions.len()); + let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( base_fee, initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), @@ -288,11 +290,12 @@ where attributes.payload_attributes.timestamp, &mut db, ) - .map_err(|_| { + .map_err(|err| { + warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) })?; - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(attributes.transactions.len()); for sequencer_tx in &attributes.transactions { // Check if the job was cancelled, if so we can exit early. if cancel.is_cancelled() { @@ -300,7 +303,7 @@ where } // A sequencer's block should never contain blob transactions. - if matches!(sequencer_tx.tx_type(), TxType::Eip4844) { + if sequencer_tx.is_eip4844() { return Err(PayloadBuilderError::other( OptimismPayloadBuilderError::BlobTransactionRejected, )) @@ -398,11 +401,9 @@ where continue } - // A sequencer's block should never contain blob transactions. - if pool_tx.tx_type() == TxType::Eip4844 as u8 { - return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, - )) + // A sequencer's block should never contain blob or deposit transactions from the pool. + if pool_tx.is_eip4844() || pool_tx.tx_type() == TxType::Deposit as u8 { + best_txs.mark_invalid(&pool_tx) } // check if the job was cancelled, if so we can exit early From 421888d22f038b0d304ac83efd600070f6c84ba4 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 25 Apr 2024 15:55:37 +0200 Subject: [PATCH 052/250] fix: log actual notification id in exex manager (#7874) --- crates/exex/src/manager.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 95b950f32..1c9eaf9ef 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -280,12 +280,16 @@ impl Future for ExExManager { // it is a logic error for this to ever underflow since the manager manages the // notification IDs - let notification_id = exex + let notification_index = exex .next_notification_id .checked_sub(self.min_id) .expect("exex expected notification ID outside the manager's range"); - if let Some(notification) = self.buffer.get(notification_id) { - debug!(exex.id, notification_id, "sent notification to exex"); + if let Some(notification) = self.buffer.get(notification_index) { + debug!( + exex.id, + notification_id = exex.next_notification_id, + "sent notification to exex" + ); if let Poll::Ready(Err(err)) = exex.send(cx, notification) { // the channel was closed, which is irrecoverable for the manager return Poll::Ready(Err(err.into())) From 6f22621f4375cc58176f7ddef9182c9439e54153 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 25 Apr 2024 21:56:04 +0800 Subject: [PATCH 053/250] chore: rename BlockChain to Blockchain (#7840) Signed-off-by: jsvisa --- crates/blockchain-tree/src/block_indices.rs | 20 ++++++------- crates/blockchain-tree/src/blockchain_tree.rs | 30 +++++++++---------- crates/blockchain-tree/src/state.rs | 18 +++++------ crates/consensus/beacon/src/engine/mod.rs | 4 +-- .../interfaces/src/blockchain_tree/error.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 6 ++-- 6 files changed, 40 insertions(+), 40 deletions(-) diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index a262148b9..373b419b3 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -1,6 +1,6 @@ //! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`] -use super::state::BlockChainId; +use super::state::BlockchainId; use crate::canonical_chain::CanonicalChain; use linked_hash_set::LinkedHashSet; use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders}; @@ -39,7 +39,7 @@ pub struct BlockIndices { /// hashes. block_number_to_block_hashes: BTreeMap>, /// Block hashes and side chain they belong - blocks_to_chain: HashMap, + blocks_to_chain: HashMap, } impl BlockIndices { @@ -71,7 +71,7 @@ impl BlockIndices { } /// Return block to chain id - pub fn blocks_to_chain(&self) -> &HashMap { + pub fn blocks_to_chain(&self) -> &HashMap { &self.blocks_to_chain } @@ -119,14 +119,14 @@ impl BlockIndices { &mut self, block_number: BlockNumber, block_hash: BlockHash, - chain_id: BlockChainId, + chain_id: BlockchainId, ) { self.block_number_to_block_hashes.entry(block_number).or_default().insert(block_hash); self.blocks_to_chain.insert(block_hash, chain_id); } /// Insert block to chain and fork child indices of the new chain - pub(crate) fn insert_chain(&mut self, chain_id: BlockChainId, chain: &Chain) { + pub(crate) fn insert_chain(&mut self, chain_id: BlockchainId, chain: &Chain) { for (number, block) in chain.blocks().iter() { // add block -> chain_id index self.blocks_to_chain.insert(block.hash(), chain_id); @@ -139,7 +139,7 @@ impl BlockIndices { } /// Get the chain ID the block belongs to - pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { + pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { self.blocks_to_chain.get(block).cloned() } @@ -149,7 +149,7 @@ impl BlockIndices { pub(crate) fn update_block_hashes( &mut self, hashes: BTreeMap, - ) -> (BTreeSet, Vec) { + ) -> (BTreeSet, Vec) { // set new canonical hashes. self.canonical_chain.replace(hashes.clone()); @@ -218,7 +218,7 @@ impl BlockIndices { /// Remove chain from indices and return dependent chains that need to be removed. /// Does the cleaning of the tree and removing blocks from the chain. - pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { + pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { chain .blocks() .iter() @@ -234,7 +234,7 @@ impl BlockIndices { &mut self, block_number: BlockNumber, block_hash: BlockHash, - ) -> BTreeSet { + ) -> BTreeSet { // rm number -> block if let btree_map::Entry::Occupied(mut entry) = self.block_number_to_block_hashes.entry(block_number) @@ -327,7 +327,7 @@ impl BlockIndices { &mut self, finalized_block: BlockNumber, num_of_additional_canonical_hashes_to_retain: u64, - ) -> BTreeSet { + ) -> BTreeSet { // get finalized chains. blocks between [self.last_finalized,finalized_block). // Dont remove finalized_block, as sidechain can point to it. let finalized_blocks: Vec = self diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b1688fda9..b98cc664a 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -2,7 +2,7 @@ use crate::{ metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, - state::{BlockChainId, TreeState}, + state::{BlockchainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, BundleStateData, TreeExternals, }; use reth_consensus::{Consensus, ConsensusError}; @@ -441,7 +441,7 @@ where fn try_insert_block_into_side_chain( &mut self, block: SealedBlockWithSenders, - chain_id: BlockChainId, + chain_id: BlockchainId, block_validation_kind: BlockValidationKind, ) -> Result { let block_num_hash = block.num_hash(); @@ -514,7 +514,7 @@ where /// # Note /// /// This is not cached in order to save memory. - fn all_chain_hashes(&self, chain_id: BlockChainId) -> BTreeMap { + fn all_chain_hashes(&self, chain_id: BlockchainId) -> BTreeMap { let mut chain_id = chain_id; let mut hashes = BTreeMap::new(); loop { @@ -553,7 +553,7 @@ where /// the block on /// /// Returns `None` if the chain is unknown. - fn canonical_fork(&self, chain_id: BlockChainId) -> Option { + fn canonical_fork(&self, chain_id: BlockchainId) -> Option { let mut chain_id = chain_id; let mut fork; loop { @@ -572,13 +572,13 @@ where /// Insert a chain into the tree. /// /// Inserts a chain into the tree and builds the block indices. - fn insert_chain(&mut self, chain: AppendableChain) -> Option { + fn insert_chain(&mut self, chain: AppendableChain) -> Option { self.state.insert_chain(chain) } /// Iterate over all child chains that depend on this block and return /// their ids. - fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { + fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { // Find all forks of given block. let mut dependent_block = self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); @@ -609,7 +609,7 @@ where /// This method searches for any chain that depended on this block being part of the canonical /// chain. Each dependent chain's state is then updated with state entries removed from the /// plain state during the unwind. - fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { + fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { // iterate over all blocks in chain and find any fork blocks that are in tree. for (number, block) in chain.blocks().iter() { let hash = block.hash(); @@ -893,7 +893,7 @@ where /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. fn remove_and_split_chain( &mut self, - chain_id: BlockChainId, + chain_id: BlockchainId, split_at: ChainSplitTarget, ) -> Option { let chain = self.state.chains.remove(&chain_id)?; @@ -1278,7 +1278,7 @@ mod tests { }; use reth_provider::{ test_utils::{ - blocks::BlockChainTestData, create_test_provider_factory_with_chain_spec, + blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, TestExecutorFactory, }, ProviderFactory, @@ -1339,7 +1339,7 @@ mod tests { /// Number of chains chain_num: Option, /// Check block to chain index - block_to_chain: Option>, + block_to_chain: Option>, /// Check fork to child index fork_to_child: Option>>, /// Pending blocks @@ -1354,7 +1354,7 @@ mod tests { self } - fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { + fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { self.block_to_chain = Some(block_to_chain); self } @@ -1602,7 +1602,7 @@ mod tests { #[test] fn sidechain_block_hashes() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let (block3, exec3) = data.blocks[2].clone(); @@ -1678,7 +1678,7 @@ mod tests { #[test] fn cached_trie_updates() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let (block3, exec3) = data.blocks[2].clone(); @@ -1766,7 +1766,7 @@ mod tests { #[test] fn test_side_chain_fork() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let genesis = data.genesis; @@ -1864,7 +1864,7 @@ mod tests { #[test] fn sanity_path() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let genesis = data.genesis; diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index f741df8ec..5013be8c1 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -10,7 +10,7 @@ pub(crate) struct TreeState { /// Keeps track of new unique identifiers for chains block_chain_id_generator: u64, /// The tracked chains and their current data. - pub(crate) chains: HashMap, + pub(crate) chains: HashMap, /// Indices to block and their connection to the canonical chain. /// /// This gets modified by the tree itself and is read from engine API/RPC to access the pending @@ -41,10 +41,10 @@ impl TreeState { /// Issues a new unique identifier for a new chain. #[inline] - fn next_id(&mut self) -> BlockChainId { + fn next_id(&mut self) -> BlockchainId { let id = self.block_chain_id_generator; self.block_chain_id_generator += 1; - BlockChainId(id) + BlockchainId(id) } /// Expose internal indices of the BlockchainTree. @@ -85,7 +85,7 @@ impl TreeState { /// Insert a chain into the tree. /// /// Inserts a chain into the tree and builds the block indices. - pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { + pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { if chain.is_empty() { return None } @@ -113,17 +113,17 @@ impl TreeState { /// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct BlockChainId(u64); +pub struct BlockchainId(u64); -impl From for u64 { - fn from(value: BlockChainId) -> Self { +impl From for u64 { + fn from(value: BlockchainId) -> Self { value.0 } } #[cfg(test)] -impl From for BlockChainId { +impl From for BlockchainId { fn from(value: u64) -> Self { - BlockChainId(value) + BlockchainId(value) } } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5e22a4860..72fc97297 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2354,7 +2354,7 @@ mod tests { genesis::{Genesis, GenesisAllocator}, Hardfork, U256, }; - use reth_provider::test_utils::blocks::BlockChainTestData; + use reth_provider::test_utils::blocks::BlockchainTestData; #[tokio::test] async fn new_payload_before_forkchoice() { @@ -2569,7 +2569,7 @@ mod tests { #[tokio::test] async fn payload_pre_merge() { - let data = BlockChainTestData::default(); + let data = BlockchainTestData::default(); let mut block1 = data.blocks[0].0.block.clone(); block1 .header diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 44f1f50bc..b63698576 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -18,7 +18,7 @@ pub enum BlockchainTreeError { last_finalized: BlockNumber, }, /// Thrown if no side chain could be found for the block. - #[error("blockChainId can't be found in BlockchainTree with internal index {chain_id}")] + #[error("chainId can't be found in BlockchainTree with internal index {chain_id}")] BlockSideChainIdConsistency { /// The internal identifier for the side chain. chain_id: u64, diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 39b6d3535..32ecb4897 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -62,14 +62,14 @@ const BLOCK_RLP: [u8; 610] = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd1 /// Test chain with genesis, blocks, execution results /// that have valid changesets. #[derive(Debug)] -pub struct BlockChainTestData { +pub struct BlockchainTestData { /// Genesis pub genesis: SealedBlock, /// Blocks with its execution result pub blocks: Vec<(SealedBlockWithSenders, BundleStateWithReceipts)>, } -impl BlockChainTestData { +impl BlockchainTestData { /// Create test data with two blocks that are connected, specifying their block numbers. pub fn default_from_number(first: BlockNumber) -> Self { let one = block1(first); @@ -85,7 +85,7 @@ impl BlockChainTestData { } } -impl Default for BlockChainTestData { +impl Default for BlockchainTestData { fn default() -> Self { let one = block1(1); let mut extended_state = one.1.clone(); From 29e5df81a46a476110116f65d0d4757391201968 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 15:56:42 +0200 Subject: [PATCH 054/250] chore: bidirectional eq for TxType (#7876) --- crates/primitives/src/transaction/tx_type.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 84a099cb7..11df417d4 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -85,17 +85,17 @@ impl TryFrom for TxType { fn try_from(value: u8) -> Result { #[cfg(feature = "optimism")] - if value == TxType::Deposit as u8 { + if value == TxType::Deposit { return Ok(TxType::Deposit) } - if value == TxType::Legacy as u8 { + if value == TxType::Legacy { return Ok(TxType::Legacy) - } else if value == TxType::Eip2930 as u8 { + } else if value == TxType::Eip2930 { return Ok(TxType::Eip2930) - } else if value == TxType::Eip1559 as u8 { + } else if value == TxType::Eip1559 { return Ok(TxType::Eip1559) - } else if value == TxType::Eip4844 as u8 { + } else if value == TxType::Eip4844 { return Ok(TxType::Eip4844) } @@ -175,6 +175,12 @@ impl PartialEq for TxType { } } +impl PartialEq for u8 { + fn eq(&self, other: &TxType) -> bool { + *self == *other as u8 + } +} + #[cfg(test)] mod tests { use super::*; From 1c81fae4d1acd73c21577ec593f6a1f15a3b07b9 Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:12:30 +0200 Subject: [PATCH 055/250] refactor: rename some examples (#7881) --- Cargo.lock | 96 +++++++++---------- Cargo.toml | 8 +- examples/README.md | 10 +- .../Cargo.toml | 2 +- .../src/main.rs | 0 .../Cargo.toml | 2 +- .../src/main.rs | 2 +- .../Cargo.toml | 2 +- .../src/main.rs | 2 +- .../Cargo.toml | 2 +- .../src/main.rs | 2 +- 11 files changed, 64 insertions(+), 64 deletions(-) rename examples/{custom-node => custom-engine-types}/Cargo.toml (95%) rename examples/{custom-node => custom-engine-types}/src/main.rs (100%) rename examples/{additional-rpc-namespace-in-cli => node-custom-rpc}/Cargo.toml (90%) rename examples/{additional-rpc-namespace-in-cli => node-custom-rpc}/src/main.rs (97%) rename examples/{cli-extension-event-hooks => node-event-hooks}/Cargo.toml (82%) rename examples/{cli-extension-event-hooks => node-event-hooks}/src/main.rs (95%) rename examples/{trace-transaction-cli => txpool-tracing}/Cargo.toml (88%) rename examples/{trace-transaction-cli => txpool-tracing}/src/main.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index 6b2e0d017..343697508 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,19 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "additional-rpc-namespace-in-cli" -version = "0.0.0" -dependencies = [ - "clap", - "eyre", - "jsonrpsee", - "reth", - "reth-node-ethereum", - "reth-transaction-pool", - "tokio", -] - [[package]] name = "addr2line" version = "0.21.0" @@ -1564,14 +1551,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" -[[package]] -name = "cli-extension-event-hooks" -version = "0.0.0" -dependencies = [ - "reth", - "reth-node-ethereum", -] - [[package]] name = "coins-bip32" version = "0.8.7" @@ -2010,47 +1989,47 @@ dependencies = [ ] [[package]] -name = "custom-evm" +name = "custom-engine-types" version = "0.0.0" dependencies = [ "eyre", "reth", + "reth-basic-payload-builder", + "reth-ethereum-payload-builder", "reth-node-api", "reth-node-core", "reth-node-ethereum", + "reth-payload-builder", "reth-primitives", + "reth-rpc-types", "reth-tracing", + "serde", + "thiserror", "tokio", ] [[package]] -name = "custom-inspector" +name = "custom-evm" version = "0.0.0" dependencies = [ - "clap", - "futures-util", + "eyre", "reth", + "reth-node-api", + "reth-node-core", "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "tokio", ] [[package]] -name = "custom-node" +name = "custom-inspector" version = "0.0.0" dependencies = [ - "eyre", + "clap", + "futures-util", "reth", - "reth-basic-payload-builder", - "reth-ethereum-payload-builder", - "reth-node-api", - "reth-node-core", "reth-node-ethereum", - "reth-payload-builder", - "reth-primitives", - "reth-rpc-types", - "reth-tracing", - "serde", - "thiserror", - "tokio", ] [[package]] @@ -4904,6 +4883,27 @@ dependencies = [ "libc", ] +[[package]] +name = "node-custom-rpc" +version = "0.0.0" +dependencies = [ + "clap", + "eyre", + "jsonrpsee", + "reth", + "reth-node-ethereum", + "reth-transaction-pool", + "tokio", +] + +[[package]] +name = "node-event-hooks" +version = "0.0.0" +dependencies = [ + "reth", + "reth-node-ethereum", +] + [[package]] name = "nom" version = "7.1.3" @@ -9277,16 +9277,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" -[[package]] -name = "trace-transaction-cli" -version = "0.0.0" -dependencies = [ - "clap", - "futures-util", - "reth", - "reth-node-ethereum", -] - [[package]] name = "tracing" version = "0.1.40" @@ -9531,6 +9521,16 @@ dependencies = [ "toml", ] +[[package]] +name = "txpool-tracing" +version = "0.0.0" +dependencies = [ + "clap", + "futures-util", + "reth", + "reth-node-ethereum", +] + [[package]] name = "typenum" version = "1.17.0" diff --git a/Cargo.toml b/Cargo.toml index 73597b311..7b9b63e95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,17 +71,17 @@ members = [ "crates/trie/", "crates/trie-parallel/", "examples/", - "examples/additional-rpc-namespace-in-cli/", + "examples/node-custom-rpc/", "examples/beacon-api-sse/", - "examples/cli-extension-event-hooks/", + "examples/node-event-hooks/", "examples/custom-evm/", - "examples/custom-node/", + "examples/custom-engine-types/", "examples/custom-node-components/", "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", "examples/rpc-db/", - "examples/trace-transaction-cli/", + "examples/txpool-tracing/", "examples/polygon-p2p/", "examples/custom-inspector/", "examples/exex/minimal/", diff --git a/examples/README.md b/examples/README.md index 791851a46..db0bdb999 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,13 +11,13 @@ to make a PR! ## Node Builder | Example | Description | -| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| [Additional RPC namespace](./additional-rpc-namespace-in-cli) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | -| [Custom event hooks](./cli-extension-event-hooks) | Illustrates how to hook to various node lifecycle events | +|---------------------------------------------------------------| ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | | [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | | [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | | [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | -| [Custom node](./custom-node) | Illustrates how to create a node with custom engine types | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | | [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | | [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | @@ -50,7 +50,7 @@ to make a PR! | Example | Description | | ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| [Trace pending transactions](./trace-transaction-cli) | Illustrates how to trace pending transactions as they arrive in the mempool | +| [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | | [Standalone txpool](./network-txpool.rs) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | ## P2P diff --git a/examples/custom-node/Cargo.toml b/examples/custom-engine-types/Cargo.toml similarity index 95% rename from examples/custom-node/Cargo.toml rename to examples/custom-engine-types/Cargo.toml index 9d41edafd..738631306 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-node" +name = "custom-engine-types" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/custom-node/src/main.rs b/examples/custom-engine-types/src/main.rs similarity index 100% rename from examples/custom-node/src/main.rs rename to examples/custom-engine-types/src/main.rs diff --git a/examples/additional-rpc-namespace-in-cli/Cargo.toml b/examples/node-custom-rpc/Cargo.toml similarity index 90% rename from examples/additional-rpc-namespace-in-cli/Cargo.toml rename to examples/node-custom-rpc/Cargo.toml index 960dd86d0..f1c5d95d9 100644 --- a/examples/additional-rpc-namespace-in-cli/Cargo.toml +++ b/examples/node-custom-rpc/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "additional-rpc-namespace-in-cli" +name = "node-custom-rpc" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/node-custom-rpc/src/main.rs similarity index 97% rename from examples/additional-rpc-namespace-in-cli/src/main.rs rename to examples/node-custom-rpc/src/main.rs index a4713f931..08b27d3ac 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run -p additional-rpc-namespace-in-cli -- node --http --ws --enable-ext +//! cargo run -p node-custom-rpc -- node --http --ws --enable-ext //! ``` //! //! This installs an additional RPC method `txpoolExt_transactionCount` that can be queried via [cast](https://github.com/foundry-rs/foundry) diff --git a/examples/cli-extension-event-hooks/Cargo.toml b/examples/node-event-hooks/Cargo.toml similarity index 82% rename from examples/cli-extension-event-hooks/Cargo.toml rename to examples/node-event-hooks/Cargo.toml index 8664057e7..eb36722aa 100644 --- a/examples/cli-extension-event-hooks/Cargo.toml +++ b/examples/node-event-hooks/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "cli-extension-event-hooks" +name = "node-event-hooks" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/cli-extension-event-hooks/src/main.rs b/examples/node-event-hooks/src/main.rs similarity index 95% rename from examples/cli-extension-event-hooks/src/main.rs rename to examples/node-event-hooks/src/main.rs index 9f09d7a3c..b9cd53298 100644 --- a/examples/cli-extension-event-hooks/src/main.rs +++ b/examples/node-event-hooks/src/main.rs @@ -4,7 +4,7 @@ //! Run with //! //! ```not_rust -//! cargo run -p cli-extension-event-hooks -- node +//! cargo run -p node-event-hooks -- node //! ``` //! //! This launch the regular reth node and also print: diff --git a/examples/trace-transaction-cli/Cargo.toml b/examples/txpool-tracing/Cargo.toml similarity index 88% rename from examples/trace-transaction-cli/Cargo.toml rename to examples/txpool-tracing/Cargo.toml index 3f681c2de..220e5d8d5 100644 --- a/examples/trace-transaction-cli/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "trace-transaction-cli" +name = "txpool-tracing" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/trace-transaction-cli/src/main.rs b/examples/txpool-tracing/src/main.rs similarity index 96% rename from examples/trace-transaction-cli/src/main.rs rename to examples/txpool-tracing/src/main.rs index ab72c2720..85a5b795a 100644 --- a/examples/trace-transaction-cli/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run --release -p trace-transaction-cli -- node --http --ws --recipients 0x....,0x.... +//! cargo run --release -p txpool-tracing -- node --http --ws --recipients 0x....,0x.... //! ``` //! //! If no recipients are specified, all transactions will be traced. From 35ac20b8e4a17658d2110b30ef0049d5841e1c55 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 17:49:43 +0200 Subject: [PATCH 056/250] feat: LaunchContext helpers (#7884) --- crates/node-builder/src/builder/mod.rs | 2 +- crates/node-builder/src/launch/common.rs | 351 ++++++++++++++++++ .../src/{launch.rs => launch/mod.rs} | 260 ++++++------- 3 files changed, 460 insertions(+), 153 deletions(-) create mode 100644 crates/node-builder/src/launch/common.rs rename crates/node-builder/src/{launch.rs => launch/mod.rs} (67%) diff --git a/crates/node-builder/src/builder/mod.rs b/crates/node-builder/src/builder/mod.rs index 44bb60588..9649360eb 100644 --- a/crates/node-builder/src/builder/mod.rs +++ b/crates/node-builder/src/builder/mod.rs @@ -449,7 +449,7 @@ where ) -> eyre::Result, CB::Components>>> { let Self { builder, task_executor, data_dir } = self; - let launcher = DefaultNodeLauncher { task_executor, data_dir }; + let launcher = DefaultNodeLauncher::new(task_executor, data_dir); builder.launch_with(launcher).await } diff --git a/crates/node-builder/src/launch/common.rs b/crates/node-builder/src/launch/common.rs new file mode 100644 index 000000000..765673bf0 --- /dev/null +++ b/crates/node-builder/src/launch/common.rs @@ -0,0 +1,351 @@ +//! Helper types that can be used by launchers. + +use eyre::Context; +use rayon::ThreadPoolBuilder; +use reth_config::PruneConfig; +use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_node_core::{ + cli::config::RethRpcConfig, + dirs::{ChainPath, DataDirPath}, + node_config::NodeConfig, +}; +use reth_primitives::{Chain, ChainSpec, Head, B256}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_rpc::JwtSecret; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::{error, info}; +use std::{cmp::max, sync::Arc, thread::available_parallelism}; + +/// Reusable setup for launching a node. +/// +/// This provides commonly used boilerplate for launching a node. +#[derive(Debug, Clone)] +pub struct LaunchContext { + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The data directory for the node. + pub data_dir: ChainPath, +} + +impl LaunchContext { + /// Create a new instance of the default node launcher. + pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { + Self { task_executor, data_dir } + } + + /// Attaches a database to the launch context. + pub fn with(self, database: DB) -> LaunchContextWith { + LaunchContextWith { inner: self, attachment: database } + } + + /// Loads the reth config with the configured `data_dir` and overrides settings according to the + /// `config`. + /// + /// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context. + pub fn with_loaded_toml_config( + self, + config: NodeConfig, + ) -> eyre::Result> { + let toml_config = self.load_toml_config(&config)?; + Ok(self.with(WithConfigs { config, toml_config })) + } + + /// Loads the reth config with the configured `data_dir` and overrides settings according to the + /// `config`. + pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); + + let mut toml_config = confy::load_path::(&config_path) + .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Update the config with the command line arguments + toml_config.peers.trusted_nodes_only = config.network.trusted_only; + + if !config.network.trusted_peers.is_empty() { + info!(target: "reth::cli", "Adding trusted nodes"); + config.network.trusted_peers.iter().for_each(|peer| { + toml_config.peers.trusted_nodes.insert(*peer); + }); + } + + Ok(toml_config) + } + + /// Configure global settings this includes: + /// + /// - Raising the file descriptor limit + /// - Configuring the global rayon thread pool + pub fn configure_globals(&self) { + // Raise the fd limit of the process. + // Does not do anything on windows. + let _ = fdlimit::raise_fd_limit(); + + // Limit the global rayon thread pool, reserving 2 cores for the rest of the system + let _ = ThreadPoolBuilder::new() + .num_threads( + available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), + ) + .build_global() + .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + } +} + +/// A [LaunchContext] along with an additional value. +/// +/// This can be used to sequentially attach additional values to the type during the launch process. +/// +/// The type provides common boilerplate for launching a node depending on the additional value. +#[derive(Debug, Clone)] +pub struct LaunchContextWith { + /// The wrapped launch context. + pub inner: LaunchContext, + /// The additional attached value. + pub attachment: T, +} + +impl LaunchContextWith { + /// Configure global settings this includes: + /// + /// - Raising the file descriptor limit + /// - Configuring the global rayon thread pool + pub fn configure_globals(&self) { + self.inner.configure_globals(); + } + + /// Returns the data directory. + pub fn data_dir(&self) -> &ChainPath { + &self.inner.data_dir + } + + /// Returns the task executor. + pub fn task_executor(&self) -> &TaskExecutor { + &self.inner.task_executor + } + + /// Attaches another value to the launch context. + pub fn attach(self, attachment: A) -> LaunchContextWith> { + LaunchContextWith { + inner: self.inner, + attachment: Attached::new(self.attachment, attachment), + } + } +} + +impl LaunchContextWith> { + /// Get a reference to the left value. + pub const fn left(&self) -> &L { + &self.attachment.left + } + + /// Get a reference to the right value. + pub const fn right(&self) -> &R { + &self.attachment.right + } + + /// Get a mutable reference to the right value. + pub fn left_mut(&mut self) -> &mut L { + &mut self.attachment.left + } + + /// Get a mutable reference to the right value. + pub fn right_mut(&mut self) -> &mut R { + &mut self.attachment.right + } +} +impl LaunchContextWith> { + /// Returns the attached [NodeConfig]. + pub const fn node_config(&self) -> &NodeConfig { + &self.left().config + } + + /// Returns the attached [NodeConfig]. + pub fn node_config_mut(&mut self) -> &mut NodeConfig { + &mut self.left_mut().config + } + + /// Returns the attached toml config [reth_config::Config]. + pub const fn toml_config(&self) -> &reth_config::Config { + &self.left().toml_config + } + + /// Returns the attached toml config [reth_config::Config]. + pub fn toml_config_mut(&mut self) -> &mut reth_config::Config { + &mut self.left_mut().toml_config + } + + /// Returns the configured chain spec. + pub fn chain_spec(&self) -> Arc { + self.node_config().chain.clone() + } + + /// Get the hash of the genesis block. + pub fn genesis_hash(&self) -> B256 { + self.node_config().chain.genesis_hash() + } + + /// Returns the chain identifier of the node. + pub fn chain_id(&self) -> Chain { + self.node_config().chain.chain + } + + /// Returns true if the node is configured as --dev + pub fn is_dev(&self) -> bool { + self.node_config().dev.dev + } + + /// Returns the configured [PruneConfig] + pub fn prune_config(&self) -> eyre::Result> { + Ok(self.node_config().prune_config()?.or_else(|| self.toml_config().prune.clone())) + } + + /// Returns the initial pipeline target, based on whether or not the node is running in + /// `debug.tip` mode, `debug.continuous` mode, or neither. + /// + /// If running in `debug.tip` mode, the configured tip is returned. + /// Otherwise, if running in `debug.continuous` mode, the genesis hash is returned. + /// Otherwise, `None` is returned. This is what the node will do by default. + pub fn initial_pipeline_target(&self) -> Option { + self.node_config().initial_pipeline_target(self.genesis_hash()) + } + + /// Loads the JWT secret for the engine API + pub fn auth_jwt_secret(&self) -> eyre::Result { + let default_jwt_path = self.data_dir().jwt_path(); + let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; + Ok(secret) + } +} + +impl LaunchContextWith> +where + DB: Clone, +{ + /// Returns the [ProviderFactory] for the attached database. + pub fn create_provider_factory(&self) -> eyre::Result> { + let factory = ProviderFactory::new( + self.right().clone(), + self.chain_spec(), + self.data_dir().static_files_path(), + )? + .with_static_files_metrics(); + + Ok(factory) + } + + /// Creates a new [ProviderFactory] and attaches it to the launch context. + pub fn with_provider_factory( + self, + ) -> eyre::Result>>> { + let factory = self.create_provider_factory()?; + let ctx = LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| factory), + }; + + Ok(ctx) + } +} + +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, +{ + /// Returns access to the underlying database. + pub fn database(&self) -> &DB { + self.right().db_ref() + } + + /// Returns the configured ProviderFactory. + pub fn provider_factory(&self) -> &ProviderFactory { + self.right() + } + + /// Returns the static file provider to interact with the static files. + pub fn static_file_provider(&self) -> StaticFileProvider { + self.right().static_file_provider() + } + + /// Starts the prometheus endpoint. + pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + let prometheus_handle = self.node_config().install_prometheus_recorder()?; + self.node_config() + .start_metrics_endpoint( + prometheus_handle, + self.database().clone(), + self.static_file_provider(), + self.task_executor().clone(), + ) + .await + } + + /// Fetches the head block from the database. + /// + /// If the database is empty, returns the genesis block. + pub fn lookup_head(&self) -> eyre::Result { + self.node_config() + .lookup_head(self.provider_factory().clone()) + .wrap_err("the head block is missing") + } +} + +/// Joins two attachments together. +#[derive(Clone, Copy, Debug)] +pub struct Attached { + left: L, + right: R, +} + +impl Attached { + /// Creates a new `Attached` with the given values. + pub const fn new(left: L, right: R) -> Self { + Self { left, right } + } + + /// Maps the left value to a new value. + pub fn map_left(self, f: F) -> Attached + where + F: FnOnce(L) -> T, + { + Attached::new(f(self.left), self.right) + } + + /// Maps the right value to a new value. + pub fn map_right(self, f: F) -> Attached + where + F: FnOnce(R) -> T, + { + Attached::new(self.left, f(self.right)) + } + + /// Get a reference to the left value. + pub const fn left(&self) -> &L { + &self.left + } + + /// Get a reference to the right value. + pub const fn right(&self) -> &R { + &self.right + } + + /// Get a mutable reference to the right value. + pub fn left_mut(&mut self) -> &mut R { + &mut self.right + } + + /// Get a mutable reference to the right value. + pub fn right_mut(&mut self) -> &mut R { + &mut self.right + } +} + +/// Helper container type to bundle the initial [NodeConfig] and the loaded settings from the +/// reth.toml config +#[derive(Debug, Clone)] +pub struct WithConfigs { + /// The configured, usually derived from the CLI. + pub config: NodeConfig, + /// The loaded reth.toml config. + pub toml_config: reth_config::Config, +} diff --git a/crates/node-builder/src/launch.rs b/crates/node-builder/src/launch/mod.rs similarity index 67% rename from crates/node-builder/src/launch.rs rename to crates/node-builder/src/launch/mod.rs index 645598ada..6181e0c98 100644 --- a/crates/node-builder/src/launch.rs +++ b/crates/node-builder/src/launch/mod.rs @@ -7,9 +7,7 @@ use crate::{ node::FullNode, BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, }; -use eyre::Context; use futures::{future, future::Either, stream, stream_select, StreamExt}; -use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -29,32 +27,35 @@ use reth_interfaces::p2p::either::EitherDownloader; use reth_network::NetworkEvents; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::{ - cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, engine_api_store::EngineApiStore, engine_skip_fcu::EngineApiSkipFcu, exit::NodeExitFuture, init::init_genesis, - node_config::NodeConfig, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; -use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; use reth_prune::PrunerBuilder; use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, error, info}; +use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; -use std::{cmp::max, future::Future, sync::Arc, thread::available_parallelism}; +use std::{future::Future, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; -/// Launches a new node. +pub mod common; +pub use common::LaunchContext; + +/// A general purpose trait that launches a new node of any kind. /// /// Acts as a node factory. /// /// This is essentially the launch logic for a node. +/// +/// See also [DefaultNodeLauncher] and [NodeBuilderWithComponents::launch_with] pub trait LaunchNode { /// The node type that is created. type Node; @@ -67,37 +68,13 @@ pub trait LaunchNode { #[derive(Debug)] pub struct DefaultNodeLauncher { /// The task executor for the node. - pub task_executor: TaskExecutor, - /// The data directory for the node. - pub data_dir: ChainPath, + pub ctx: LaunchContext, } impl DefaultNodeLauncher { /// Create a new instance of the default node launcher. pub fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { - Self { task_executor, data_dir } - } - - /// Loads the reth config with the given datadir root - fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { - let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); - - let mut toml_config = confy::load_path::(&config_path) - .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - - info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); - - // Update the config with the command line arguments - toml_config.peers.trusted_nodes_only = config.network.trusted_only; - - if !config.network.trusted_peers.is_empty() { - info!(target: "reth::cli", "Adding trusted nodes"); - config.network.trusted_peers.iter().for_each(|peer| { - toml_config.peers.trusted_nodes.insert(*peer); - }); - } - - Ok(toml_config) + Self { ctx: LaunchContext::new(task_executor, data_dir) } } } @@ -114,6 +91,7 @@ where self, target: NodeBuilderWithComponents, CB>, ) -> eyre::Result { + let Self { ctx } = self; let NodeBuilderWithComponents { adapter: NodeTypesAdapter { types, database }, components_builder, @@ -121,74 +99,53 @@ where config, } = target; - // get config from file - let reth_config = self.load_toml_config(&config)?; - - let Self { task_executor, data_dir } = self; - - // Raise the fd limit of the process. - // Does not do anything on windows. - fdlimit::raise_fd_limit()?; + // configure globals + ctx.configure_globals(); - // Limit the global rayon thread pool, reserving 2 cores for the rest of the system - let _ = ThreadPoolBuilder::new() - .num_threads( - available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), - ) - .build_global() - .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + let mut ctx = ctx + // load the toml config + .with_loaded_toml_config(config)? + // attach the database + .attach(database.clone()) + // Create the provider factory + .with_provider_factory()?; - let provider_factory = ProviderFactory::new( - database.clone(), - Arc::clone(&config.chain), - data_dir.static_files_path(), - )? - .with_static_files_metrics(); info!(target: "reth::cli", "Database opened"); - let prometheus_handle = config.install_prometheus_recorder()?; - config - .start_metrics_endpoint( - prometheus_handle, - database.clone(), - provider_factory.static_file_provider(), - task_executor.clone(), - ) - .await?; + ctx.start_prometheus_endpoint().await?; - debug!(target: "reth::cli", chain=%config.chain.chain, -genesis=?config.chain.genesis_hash(), "Initializing genesis"); + debug!(target: "reth::cli", chain=%ctx.chain_id(), genesis=?ctx.genesis_hash(), "Initializing genesis"); - let genesis_hash = init_genesis(provider_factory.clone())?; + init_genesis(ctx.provider_factory().clone())?; - info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); + info!(target: "reth::cli", "\n{}", ctx.chain_spec().display_hardforks()); // setup the consensus instance - let consensus: Arc = if config.dev.dev { - Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) + let consensus: Arc = if ctx.is_dev() { + Arc::new(AutoSealConsensus::new(ctx.chain_spec())) } else { - Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) + Arc::new(BeaconConsensus::new(ctx.chain_spec())) }; debug!(target: "reth::cli", "Spawning stages metrics listener task"); let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); - task_executor.spawn_critical("stages metrics listener task", sync_metrics_listener); + ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); - let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); + let prune_config = ctx.prune_config()?; // Configure the blockchain tree for the node let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); let tree_externals = TreeExternals::new( - provider_factory.clone(), + ctx.provider_factory().clone(), consensus.clone(), - EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), + EvmProcessorFactory::new(ctx.chain_spec(), evm_config.clone()), ); let tree = BlockchainTree::new( tree_externals, tree_config, - prune_config.as_ref().map(|config| config.segments.clone()), + prune_config.as_ref().map(|prune| prune.segments.clone()), )? .with_sync_metrics_tx(sync_metrics_tx.clone()); @@ -197,40 +154,30 @@ genesis=?config.chain.genesis_hash(), "Initializing genesis"); debug!(target: "reth::cli", "configured blockchain tree"); // fetch the head block from the database - let head = - config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; + let head = ctx.lookup_head()?; // setup the blockchain provider let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + BlockchainProvider::new(ctx.provider_factory().clone(), blockchain_tree.clone())?; - let ctx = BuilderContext::new( + let builder_ctx = BuilderContext::new( head, - blockchain_db, - task_executor, - data_dir, - config, - reth_config, + blockchain_db.clone(), + ctx.task_executor().clone(), + ctx.data_dir().clone(), + ctx.node_config().clone(), + ctx.toml_config().clone(), evm_config.clone(), ); debug!(target: "reth::cli", "creating components"); - let components = components_builder.build_components(&ctx).await?; - - let BuilderContext { - provider: blockchain_db, - executor, - data_dir, - mut config, - mut reth_config, - .. - } = ctx; + let components = components_builder.build_components(&builder_ctx).await?; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; let node_adapter = NodeAdapter { components, - task_executor: executor.clone(), + task_executor: ctx.task_executor().clone(), provider: blockchain_db.clone(), evm: evm_config.clone(), }; @@ -250,16 +197,16 @@ genesis=?config.chain.genesis_hash(), "Initializing genesis"); let context = ExExContext { head, provider: blockchain_db.clone(), - task_executor: executor.clone(), - data_dir: data_dir.clone(), - config: config.clone(), - reth_config: reth_config.clone(), + task_executor: ctx.task_executor().clone(), + data_dir: ctx.data_dir().clone(), + config: ctx.node_config().clone(), + reth_config: ctx.toml_config().clone(), pool: node_adapter.components.pool().clone(), events, notifications, }; - let executor = executor.clone(); + let executor = ctx.task_executor().clone(); exexs.push(async move { debug!(target: "reth::cli", id, "spawning exex"); let span = reth_tracing::tracing::info_span!("exex", id); @@ -287,21 +234,24 @@ genesis=?config.chain.genesis_hash(), "Initializing genesis"); // todo(onbjerg): rm magic number let exex_manager = ExExManager::new(exex_handles, 1024); let exex_manager_handle = exex_manager.handle(); - executor.spawn_critical("exex manager", async move { + ctx.task_executor().spawn_critical("exex manager", async move { exex_manager.await.expect("exex manager crashed"); }); // send notifications from the blockchain tree to exex manager let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); let mut handle = exex_manager_handle.clone(); - executor.spawn_critical("exex manager blockchain tree notifications", async move { - while let Ok(notification) = canon_state_notifications.recv().await { - handle.send_async(notification.into()).await.expect( - "blockchain tree notification could not be sent to exex + ctx.task_executor().spawn_critical( + "exex manager blockchain tree notifications", + async move { + while let Ok(notification) = canon_state_notifications.recv().await { + handle.send_async(notification.into()).await.expect( + "blockchain tree notification could not be sent to exex manager", - ); - } - }); + ); + } + }, + ); info!(target: "reth::cli", "ExEx Manager started"); @@ -314,52 +264,59 @@ manager", let network_client = node_adapter.network().fetch_client().await?; let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); - if let Some(skip_fcu_threshold) = config.debug.skip_fcu { + if let Some(skip_fcu_threshold) = ctx.node_config().debug.skip_fcu { debug!(target: "reth::cli", "spawning skip FCU task"); let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - executor.spawn_critical( + ctx.task_executor().spawn_critical( "skip FCU interceptor", engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), ); consensus_engine_rx = skip_fcu_rx; } - if let Some(store_path) = config.debug.engine_api_store.clone() { + if let Some(store_path) = ctx.node_config().debug.engine_api_store.clone() { debug!(target: "reth::cli", "spawning engine API store"); let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); let engine_api_store = EngineApiStore::new(store_path); - executor.spawn_critical( + ctx.task_executor().spawn_critical( "engine api interceptor", engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), ); consensus_engine_rx = engine_intercept_rx; }; - let max_block = config.max_block(network_client.clone(), provider_factory.clone()).await?; + let max_block = ctx + .node_config() + .max_block(network_client.clone(), ctx.provider_factory().clone()) + .await?; let mut hooks = EngineHooks::new(); let static_file_producer = StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), + ctx.provider_factory().clone(), + ctx.static_file_provider(), prune_config.clone().unwrap_or_default().segments, ); let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); + hooks.add(StaticFileHook::new( + static_file_producer.clone(), + Box::new(ctx.task_executor().clone()), + )); info!(target: "reth::cli", "StaticFileProducer initialized"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if reth_config.stages.etl.dir.is_none() { - reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + if ctx.toml_config_mut().stages.etl.dir.is_none() { + ctx.toml_config_mut().stages.etl.dir = + Some(EtlConfig::from_datadir(&ctx.data_dir().data_dir_path())); } // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if config.dev.dev { + let (mut pipeline, client) = if ctx.is_dev() { info!(target: "reth::cli", "Starting Reth in dev mode"); - for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { + for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); } @@ -368,9 +325,9 @@ address.to_string(), format_ether(alloc.balance)); let pending_transactions_listener = node_adapter.components.pool().pending_transactions_listener(); - let mining_mode = if let Some(interval) = config.dev.block_time { + let mining_mode = if let Some(interval) = ctx.node_config().dev.block_time { MiningMode::interval(interval) - } else if let Some(max_transactions) = config.dev.block_max_transactions { + } else if let Some(max_transactions) = ctx.node_config().dev.block_max_transactions { MiningMode::instant(max_transactions, pending_transactions_listener) } else { info!(target: "reth::cli", "No mining mode specified, defaulting to @@ -379,7 +336,7 @@ ReadyTransaction"); }; let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - Arc::clone(&config.chain), + ctx.chain_spec(), blockchain_db.clone(), node_adapter.components.pool().clone(), consensus_engine_tx.clone(), @@ -390,12 +347,12 @@ ReadyTransaction"); .build(); let mut pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, + ctx.node_config(), + &ctx.toml_config().stages, client.clone(), Arc::clone(&consensus), - provider_factory.clone(), - &executor, + ctx.provider_factory().clone(), + ctx.task_executor(), sync_metrics_tx, prune_config.clone(), max_block, @@ -408,17 +365,17 @@ ReadyTransaction"); let pipeline_events = pipeline.events(); task.set_pipeline_events(pipeline_events); debug!(target: "reth::cli", "Spawning auto mine task"); - executor.spawn(Box::pin(task)); + ctx.task_executor().spawn(Box::pin(task)); (pipeline, EitherDownloader::Left(client)) } else { let pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, + ctx.node_config(), + &ctx.toml_config().stages, network_client.clone(), Arc::clone(&consensus), - provider_factory.clone(), - &executor, + ctx.provider_factory().clone(), + ctx.task_executor(), sync_metrics_tx, prune_config.clone(), max_block, @@ -433,22 +390,22 @@ ReadyTransaction"); let pipeline_events = pipeline.events(); - let initial_target = config.initial_pipeline_target(genesis_hash); + let initial_target = ctx.initial_pipeline_target(); let prune_config = prune_config.unwrap_or_default(); let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(config.chain.prune_delete_limit) + .prune_delete_limit(ctx.chain_spec().prune_delete_limit) .timeout(PrunerBuilder::DEFAULT_TIMEOUT); if let Some(exex_manager_handle) = &exex_manager_handle { pruner_builder = pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } - let mut pruner = pruner_builder.build(provider_factory.clone()); + let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); let pruner_events = pruner.events(); - hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); + hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); info!(target: "reth::cli", ?prune_config, "Pruner initialized"); // Configure the consensus engine @@ -456,10 +413,10 @@ ReadyTransaction"); client, pipeline, blockchain_db.clone(), - Box::new(executor.clone()), + Box::new(ctx.task_executor().clone()), Box::new(node_adapter.components.network().clone()), max_block, - config.debug.continuous, + ctx.node_config().debug.continuous, node_adapter.components.payload_builder().clone(), initial_target, reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, @@ -473,7 +430,7 @@ ReadyTransaction"); node_adapter.components.network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), - if config.debug.tip.is_none() && !config.dev.dev { + if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) .map(Into::into), @@ -484,7 +441,7 @@ ReadyTransaction"); pruner_events.map(Into::into), static_file_producer_events.map(Into::into) ); - executor.spawn_critical( + ctx.task_executor().spawn_critical( "events task", node::handle_events( Some(node_adapter.components.network().clone()), @@ -496,39 +453,38 @@ ReadyTransaction"); let engine_api = EngineApi::new( blockchain_db.clone(), - config.chain.clone(), + ctx.chain_spec(), beacon_engine_handle, node_adapter.components.payload_builder().clone().into(), - Box::new(executor.clone()), + Box::new(ctx.task_executor().clone()), ); info!(target: "reth::cli", "Engine API handler initialized"); // extract the jwt secret from the args if possible - let default_jwt_path = data_dir.jwt_path(); - let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; + let jwt_secret = ctx.auth_jwt_secret()?; // adjust rpc port numbers based on instance number - config.adjust_instance_ports(); + ctx.node_config_mut().adjust_instance_ports(); // Start RPC servers let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( node_adapter.clone(), engine_api, - &config, + ctx.node_config(), jwt_secret, rpc, ) .await?; // in dev mode we generate 20 random dev-signer accounts - if config.dev.dev { + if ctx.is_dev() { rpc_registry.eth_api().with_dev_accounts(); } // Run consensus engine to completion let (tx, rx) = oneshot::channel(); info!(target: "reth::cli", "Starting consensus engine"); - executor.spawn_critical_blocking("consensus engine", async move { + ctx.task_executor().spawn_critical_blocking("consensus engine", async move { let res = beacon_consensus_engine.await; let _ = tx.send(res); }); @@ -539,11 +495,11 @@ ReadyTransaction"); network: node_adapter.components.network().clone(), provider: node_adapter.provider.clone(), payload_builder: node_adapter.components.payload_builder().clone(), - task_executor: executor, + task_executor: ctx.task_executor().clone(), rpc_server_handles, rpc_registry, - config, - data_dir, + config: ctx.node_config().clone(), + data_dir: ctx.data_dir().clone(), }; // Notify on node started on_node_started.on_event(full_node.clone())?; From 9567b256c8cb4a8cd439bd7e6e7b1be3b032efde Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Thu, 25 Apr 2024 23:51:31 +0800 Subject: [PATCH 057/250] feat: support max_request_body_size (#7880) --- crates/rpc/ipc/src/server/ipc.rs | 22 ++++++--- crates/rpc/ipc/src/server/mod.rs | 80 +++++++++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 9 deletions(-) diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 1fd600c03..daf7d1dc0 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -11,7 +11,10 @@ use jsonrpsee::{ JsonRawValue, }, server::middleware::rpc::RpcServiceT, - types::{error::ErrorCode, ErrorObject, Id, InvalidRequest, Notification, Request}, + types::{ + error::{reject_too_big_request, ErrorCode}, + ErrorObject, Id, InvalidRequest, Notification, Request, + }, BatchResponseBuilder, MethodResponse, ResponsePayload, }; use tokio::sync::OwnedSemaphorePermit; @@ -124,6 +127,7 @@ pub(crate) async fn call_with_service( request: String, rpc_service: S, max_response_body_size: usize, + max_request_body_size: usize, conn: Arc, ) -> Option where @@ -143,9 +147,17 @@ where }) .unwrap_or(Kind::Single); + let data = request.into_bytes(); + if data.len() > max_request_body_size { + return Some(batch_response_error( + Id::Null, + reject_too_big_request(max_request_body_size as u32), + )); + } + // Single request or notification let res = if matches!(request_kind, Kind::Single) { - let response = process_single_request(request.into_bytes(), &rpc_service).await; + let response = process_single_request(data, &rpc_service).await; match response { Some(response) if response.is_method_call() => Some(response.to_result()), _ => { @@ -155,11 +167,7 @@ where } } } else { - process_batch_request( - Batch { data: request.into_bytes(), rpc_service }, - max_response_body_size, - ) - .await + process_batch_request(Batch { data, rpc_service }, max_response_body_size).await }; drop(conn); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 7afb6bb7d..5301c7d21 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -378,6 +378,7 @@ where }; let max_response_body_size = self.inner.max_response_body_size as usize; + let max_request_body_size = self.inner.max_request_body_size as usize; let rpc_service = self.rpc_middleware.service(RpcService::new( self.inner.methods.clone(), max_response_body_size, @@ -392,7 +393,14 @@ where // work to a separate task takes the pressure off the connection so all concurrent responses // are also serialized concurrently and the connection can focus on read+write let f = tokio::task::spawn(async move { - ipc::call_with_service(request, rpc_service, max_response_body_size, conn).await + ipc::call_with_service( + request, + rpc_service, + max_response_body_size, + max_request_body_size, + conn, + ) + .await }); Box::pin(async move { f.await.map_err(|err| err.into()) }) @@ -780,7 +788,11 @@ mod tests { use crate::client::IpcClientBuilder; use futures::future::{select, Either}; use jsonrpsee::{ - core::client::{ClientT, Subscription, SubscriptionClientT}, + core::{ + client, + client::{ClientT, Error, Subscription, SubscriptionClientT}, + params::BatchRequestBuilder, + }, rpc_params, types::Request, PendingSubscriptionSink, RpcModule, SubscriptionMessage, @@ -834,6 +846,46 @@ mod tests { } } + #[tokio::test] + async fn can_set_the_max_response_body_size() { + let endpoint = dummy_endpoint(); + let server = Builder::default().max_response_body_size(100).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "a".repeat(101)).unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let response: Result = client.request("anything", rpc_params![]).await; + assert!(response.unwrap_err().to_string().contains("Exceeded max limit of")); + } + + #[tokio::test] + async fn can_set_the_max_request_body_size() { + let endpoint = dummy_endpoint(); + let server = Builder::default().max_request_body_size(100).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "succeed").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let response: Result = + client.request("anything", rpc_params!["a".repeat(101)]).await; + assert!(response.is_err()); + let mut batch_request_builder = BatchRequestBuilder::new(); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + // the raw request string is: + // [{"jsonrpc":"2.0","id":0,"method":"anything"},{"jsonrpc":"2.0","id":1, \ + // "method":"anything"},{"jsonrpc":"2.0","id":2,"method":"anything"}]" + // which is 136 bytes, more than 100 bytes. + let response: Result, Error> = + client.batch_request(batch_request_builder).await; + assert!(response.is_err()); + } + #[tokio::test] async fn test_rpc_request() { let endpoint = dummy_endpoint(); @@ -849,6 +901,30 @@ mod tests { assert_eq!(response, msg); } + #[tokio::test] + async fn test_batch_request() { + let endpoint = dummy_endpoint(); + let server = Builder::default().build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "ok").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let mut batch_request_builder = BatchRequestBuilder::new(); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let result = client + .batch_request(batch_request_builder) + .await + .unwrap() + .into_ok() + .unwrap() + .collect::>(); + assert_eq!(result, vec!["ok", "ok", "ok"]); + } + #[tokio::test] async fn test_ipc_modules() { reth_tracing::init_test_tracing(); From 6d1aab53806903c3707d9843ba1a433c1cf61f5d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 25 Apr 2024 18:44:02 +0200 Subject: [PATCH 058/250] refactor: minor `Signature` refactors (#7888) --- crates/primitives/src/transaction/signature.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 84ae2915f..8cd57dc7f 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -33,7 +33,7 @@ impl Signature { /// signature. #[cfg(feature = "optimism")] pub const fn optimism_deposit_tx_signature() -> Self { - Signature { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } + Self { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } } } @@ -52,7 +52,7 @@ impl Compact for Signature { let r = U256::from_le_slice(&buf[0..32]); let s = U256::from_le_slice(&buf[32..64]); buf.advance(64); - (Signature { r, s, odd_y_parity: identifier != 0 }, buf) + (Self { r, s, odd_y_parity: identifier != 0 }, buf) } } @@ -112,17 +112,17 @@ impl Signature { // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock if v == 0 && r.is_zero() && s.is_zero() { - return Ok((Signature { r, s, odd_y_parity: false }, None)) + return Ok((Self { r, s, odd_y_parity: false }, None)) } return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } let odd_y_parity = v == 28; - Ok((Signature { r, s, odd_y_parity }, None)) + Ok((Self { r, s, odd_y_parity }, None)) } else { // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 let odd_y_parity = ((v - 35) % 2) != 0; let chain_id = (v - 35) >> 1; - Ok((Signature { r, s, odd_y_parity }, Some(chain_id))) + Ok((Self { r, s, odd_y_parity }, Some(chain_id))) } } @@ -140,7 +140,7 @@ impl Signature { /// Decodes the `odd_y_parity`, `r`, `s` values without a RLP header. pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Signature { + Ok(Self { odd_y_parity: Decodable::decode(buf)?, r: Decodable::decode(buf)?, s: Decodable::decode(buf)?, From 57e3f40dda4411074264bbcd1d84d4a4f7799ca3 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 25 Apr 2024 18:45:11 +0200 Subject: [PATCH 059/250] chore: unpin cc (#7891) --- Cargo.lock | 5 +++-- crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 343697508..256d5e4b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1416,12 +1416,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] diff --git a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml index cebae37b3..fbdad4c51 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml +++ b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml @@ -15,6 +15,5 @@ name = "reth_mdbx_sys" libc = "0.2" [build-dependencies] -## temp pin -cc = "=1.0.83" +cc = "1.0" bindgen = { version = "0.69", default-features = false, features = ["runtime"] } From d312dbbea4ed10127aecc3507db69b381acc416e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 19:32:12 +0200 Subject: [PATCH 060/250] fix: chain spec for op mainnet (#7883) --- crates/ethereum-forks/src/hardfork.rs | 3 +++ crates/primitives/src/chain/spec.rs | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs index 6ccb30697..41d1f1302 100644 --- a/crates/ethereum-forks/src/hardfork.rs +++ b/crates/ethereum-forks/src/hardfork.rs @@ -73,6 +73,9 @@ pub enum Hardfork { // Upcoming /// Prague: Prague, + /// Fjord: + #[cfg(feature = "optimism")] + Fjord, } impl Hardfork { diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index fb657b5f9..a1ae18ad0 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -270,10 +270,10 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1707238800) - .ecotone(1707238800), + .shanghai(1704992401) + .canyon(1704992401) + .cancun(1710374401) + .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -286,12 +286,12 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { (Hardfork::Istanbul, ForkCondition::Block(0)), (Hardfork::MuirGlacier, ForkCondition::Block(0)), (Hardfork::Berlin, ForkCondition::Block(3950000)), - (Hardfork::London, ForkCondition::Block(3950000)), - (Hardfork::ArrowGlacier, ForkCondition::Block(3950000)), - (Hardfork::GrayGlacier, ForkCondition::Block(3950000)), + (Hardfork::London, ForkCondition::Block(105235063)), + (Hardfork::ArrowGlacier, ForkCondition::Block(105235063)), + (Hardfork::GrayGlacier, ForkCondition::Block(105235063)), ( Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(3950000), total_difficulty: U256::from(0) }, + ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::from(0) }, ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), From 663a7185e6c391109466f62a6fc68205121676a6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 20:18:46 +0200 Subject: [PATCH 061/250] chore: more LaunchContext helpers (#7894) --- crates/consensus/auto-seal/src/mode.rs | 11 +++ crates/node-builder/src/launch/common.rs | 95 +++++++++++++++++++++-- crates/node-builder/src/launch/mod.rs | 75 +++++------------- crates/node-core/src/args/pruning_args.rs | 46 ++++++----- crates/node-core/src/node_config.rs | 4 +- 5 files changed, 143 insertions(+), 88 deletions(-) diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs index 809455311..b124010e6 100644 --- a/crates/consensus/auto-seal/src/mode.rs +++ b/crates/consensus/auto-seal/src/mode.rs @@ -62,6 +62,17 @@ impl MiningMode { } } +impl fmt::Display for MiningMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let kind = match self { + MiningMode::None => "None", + MiningMode::Auto(_) => "Auto", + MiningMode::FixedBlockTime(_) => "FixedBlockTime", + }; + write!(f, "{kind}") + } +} + /// A miner that's supposed to create a new block every `interval`, mining all transactions that are /// ready at that time. /// diff --git a/crates/node-builder/src/launch/common.rs b/crates/node-builder/src/launch/common.rs index 765673bf0..f4d2a931c 100644 --- a/crates/node-builder/src/launch/common.rs +++ b/crates/node-builder/src/launch/common.rs @@ -1,20 +1,28 @@ //! Helper types that can be used by launchers. +use std::{cmp::max, sync::Arc, thread::available_parallelism}; + use eyre::Context; use rayon::ThreadPoolBuilder; -use reth_config::PruneConfig; +use tokio::sync::mpsc::Receiver; + +use reth_auto_seal_consensus::MiningMode; +use reth_config::{config::EtlConfig, PruneConfig}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_interfaces::p2p::headers::client::HeadersClient; use reth_node_core::{ cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, + init::{init_genesis, InitDatabaseError}, node_config::NodeConfig, }; -use reth_primitives::{Chain, ChainSpec, Head, B256}; +use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_prune::PrunerBuilder; use reth_rpc::JwtSecret; +use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{error, info}; -use std::{cmp::max, sync::Arc, thread::available_parallelism}; /// Reusable setup for launching a node. /// @@ -73,6 +81,12 @@ impl LaunchContext { Ok(toml_config) } + /// Convenience function to [Self::configure_globals] + pub fn with_configured_globals(self) -> Self { + self.configure_globals(); + self + } + /// Configure global settings this includes: /// /// - Raising the file descriptor limit @@ -155,6 +169,31 @@ impl LaunchContextWith> { } } impl LaunchContextWith> { + /// Adjust certain settings in the config to make sure they are set correctly + /// + /// This includes: + /// - Making sure the ETL dir is set to the datadir + /// - RPC settings are adjusted to the correct port + pub fn with_adjusted_configs(self) -> Self { + self.ensure_etl_datadir().with_adjusted_rpc_instance_ports() + } + + /// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + pub fn ensure_etl_datadir(mut self) -> Self { + if self.toml_config_mut().stages.etl.dir.is_none() { + self.toml_config_mut().stages.etl.dir = + Some(EtlConfig::from_datadir(&self.data_dir().data_dir_path())) + } + + self + } + + /// Change rpc port numbers based on the instance number. + pub fn with_adjusted_rpc_instance_ports(mut self) -> Self { + self.node_config_mut().adjust_instance_ports(); + self + } + /// Returns the attached [NodeConfig]. pub const fn node_config(&self) -> &NodeConfig { &self.left().config @@ -196,8 +235,20 @@ impl LaunchContextWith> { } /// Returns the configured [PruneConfig] - pub fn prune_config(&self) -> eyre::Result> { - Ok(self.node_config().prune_config()?.or_else(|| self.toml_config().prune.clone())) + pub fn prune_config(&self) -> Option { + self.node_config().prune_config().or_else(|| self.toml_config().prune.clone()) + } + + /// Returns the configured [PruneModes] + pub fn prune_modes(&self) -> Option { + self.prune_config().map(|config| config.segments) + } + + /// Returns an initialized [PrunerBuilder] based on the configured [PruneConfig] + pub fn pruner_builder(&self) -> PrunerBuilder { + PrunerBuilder::new(self.prune_config().unwrap_or_default()) + .prune_delete_limit(self.chain_spec().prune_delete_limit) + .timeout(PrunerBuilder::DEFAULT_TIMEOUT) } /// Returns the initial pipeline target, based on whether or not the node is running in @@ -216,6 +267,17 @@ impl LaunchContextWith> { let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; Ok(secret) } + + /// Returns the [MiningMode] intended for --dev mode. + pub fn dev_mining_mode(&self, pending_transactions_listener: Receiver) -> MiningMode { + if let Some(interval) = self.node_config().dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { + MiningMode::instant(max_transactions, pending_transactions_listener) + } else { + MiningMode::instant(1, pending_transactions_listener) + } + } } impl LaunchContextWith> @@ -267,6 +329,29 @@ where self.right().static_file_provider() } + /// Creates a new [StaticFileProducer] with the attached database. + pub fn static_file_producer(&self) -> StaticFileProducer { + StaticFileProducer::new( + self.provider_factory().clone(), + self.static_file_provider(), + self.prune_modes().unwrap_or_default(), + ) + } + + /// Write the genesis block and state if it has not already been written + pub fn init_genesis(&self) -> Result { + init_genesis(self.provider_factory().clone()) + } + + /// Returns the max block that the node should run to, looking it up from the network if + /// necessary + pub async fn max_block(&self, client: C) -> eyre::Result> + where + C: HeadersClient, + { + self.node_config().max_block(client, self.provider_factory().clone()).await + } + /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { let prometheus_handle = self.node_config().install_prometheus_recorder()?; diff --git a/crates/node-builder/src/launch/mod.rs b/crates/node-builder/src/launch/mod.rs index 6181e0c98..00304816c 100644 --- a/crates/node-builder/src/launch/mod.rs +++ b/crates/node-builder/src/launch/mod.rs @@ -8,7 +8,7 @@ use crate::{ BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, }; use futures::{future, future::Either, stream, stream_select, StreamExt}; -use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; +use reth_auto_seal_consensus::AutoSealConsensus; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensus, BeaconConsensusEngine, @@ -16,7 +16,6 @@ use reth_beacon_consensus::{ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; -use reth_config::config::EtlConfig; use reth_consensus::Consensus; use reth_db::{ database::Database, @@ -31,15 +30,12 @@ use reth_node_core::{ engine_api_store::EngineApiStore, engine_skip_fcu::EngineApiSkipFcu, exit::NodeExitFuture, - init::init_genesis, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; -use reth_prune::PrunerBuilder; use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; -use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -99,14 +95,14 @@ where config, } = target; - // configure globals - ctx.configure_globals(); - - let mut ctx = ctx + let ctx = ctx + .with_configured_globals() // load the toml config .with_loaded_toml_config(config)? // attach the database .attach(database.clone()) + // ensure certain settings take effect + .with_adjusted_configs() // Create the provider factory .with_provider_factory()?; @@ -115,8 +111,7 @@ where ctx.start_prometheus_endpoint().await?; debug!(target: "reth::cli", chain=%ctx.chain_id(), genesis=?ctx.genesis_hash(), "Initializing genesis"); - - init_genesis(ctx.provider_factory().clone())?; + ctx.init_genesis()?; info!(target: "reth::cli", "\n{}", ctx.chain_spec().display_hardforks()); @@ -132,8 +127,6 @@ where let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); - let prune_config = ctx.prune_config()?; - // Configure the blockchain tree for the node let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); @@ -142,12 +135,8 @@ where consensus.clone(), EvmProcessorFactory::new(ctx.chain_spec(), evm_config.clone()), ); - let tree = BlockchainTree::new( - tree_externals, - tree_config, - prune_config.as_ref().map(|prune| prune.segments.clone()), - )? - .with_sync_metrics_tx(sync_metrics_tx.clone()); + let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? + .with_sync_metrics_tx(sync_metrics_tx.clone()); let canon_state_notification_sender = tree.canon_state_notification_sender(); let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); @@ -286,17 +275,10 @@ manager", consensus_engine_rx = engine_intercept_rx; }; - let max_block = ctx - .node_config() - .max_block(network_client.clone(), ctx.provider_factory().clone()) - .await?; + let max_block = ctx.max_block(network_client.clone()).await?; let mut hooks = EngineHooks::new(); - let static_file_producer = StaticFileProducer::new( - ctx.provider_factory().clone(), - ctx.static_file_provider(), - prune_config.clone().unwrap_or_default().segments, - ); + let static_file_producer = ctx.static_file_producer(); let static_file_producer_events = static_file_producer.lock().events(); hooks.add(StaticFileHook::new( static_file_producer.clone(), @@ -304,12 +286,6 @@ manager", )); info!(target: "reth::cli", "StaticFileProducer initialized"); - // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if ctx.toml_config_mut().stages.etl.dir.is_none() { - ctx.toml_config_mut().stages.etl.dir = - Some(EtlConfig::from_datadir(&ctx.data_dir().data_dir_path())); - } - // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); @@ -322,18 +298,9 @@ address.to_string(), format_ether(alloc.balance)); } // install auto-seal - let pending_transactions_listener = - node_adapter.components.pool().pending_transactions_listener(); - - let mining_mode = if let Some(interval) = ctx.node_config().dev.block_time { - MiningMode::interval(interval) - } else if let Some(max_transactions) = ctx.node_config().dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) - } else { - info!(target: "reth::cli", "No mining mode specified, defaulting to -ReadyTransaction"); - MiningMode::instant(1, pending_transactions_listener) - }; + let mining_mode = + ctx.dev_mining_mode(node_adapter.components.pool().pending_transactions_listener()); + info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( ctx.chain_spec(), @@ -354,7 +321,7 @@ ReadyTransaction"); ctx.provider_factory().clone(), ctx.task_executor(), sync_metrics_tx, - prune_config.clone(), + ctx.prune_config(), max_block, static_file_producer, evm_config, @@ -377,7 +344,7 @@ ReadyTransaction"); ctx.provider_factory().clone(), ctx.task_executor(), sync_metrics_tx, - prune_config.clone(), + ctx.prune_config(), max_block, static_file_producer, evm_config, @@ -392,11 +359,8 @@ ReadyTransaction"); let initial_target = ctx.initial_pipeline_target(); - let prune_config = prune_config.unwrap_or_default(); - let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) - .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(ctx.chain_spec().prune_delete_limit) - .timeout(PrunerBuilder::DEFAULT_TIMEOUT); + let mut pruner_builder = + ctx.pruner_builder().max_reorg_depth(tree_config.max_reorg_depth() as usize); if let Some(exex_manager_handle) = &exex_manager_handle { pruner_builder = pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); @@ -405,8 +369,8 @@ ReadyTransaction"); let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); let pruner_events = pruner.events(); + info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); - info!(target: "reth::cli", ?prune_config, "Pruner initialized"); // Configure the consensus engine let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( @@ -463,9 +427,6 @@ ReadyTransaction"); // extract the jwt secret from the args if possible let jwt_secret = ctx.auth_jwt_secret()?; - // adjust rpc port numbers based on instance number - ctx.node_config_mut().adjust_instance_ports(); - // Start RPC servers let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( node_adapter.clone(), diff --git a/crates/node-core/src/args/pruning_args.rs b/crates/node-core/src/args/pruning_args.rs index 52605338e..4adc72158 100644 --- a/crates/node-core/src/args/pruning_args.rs +++ b/crates/node-core/src/args/pruning_args.rs @@ -5,7 +5,6 @@ use reth_config::config::PruneConfig; use reth_primitives::{ ChainSpec, PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; -use std::sync::Arc; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -19,31 +18,30 @@ pub struct PruningArgs { impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self, chain_spec: Arc) -> eyre::Result> { - Ok(if self.full { - Some(PruneConfig { - block_interval: 5, - segments: PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: None, - receipts: chain_spec + pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option { + if !self.full { + return None; + } + Some(PruneConfig { + block_interval: 5, + segments: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: None, + receipts: chain_spec + .deposit_contract + .as_ref() + .map(|contract| PruneMode::Before(contract.block)), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + receipts_log_filter: ReceiptsLogPruneConfig( + chain_spec .deposit_contract .as_ref() - .map(|contract| PruneMode::Before(contract.block)), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - receipts_log_filter: ReceiptsLogPruneConfig( - chain_spec - .deposit_contract - .as_ref() - .map(|contract| (contract.address, PruneMode::Before(contract.block))) - .into_iter() - .collect(), - ), - }, - }) - } else { - None + .map(|contract| (contract.address, PruneMode::Before(contract.block))) + .into_iter() + .collect(), + ), + }, }) } } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 608f12cad..c25395e07 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -262,8 +262,8 @@ impl NodeConfig { } /// Returns pruning configuration. - pub fn prune_config(&self) -> eyre::Result> { - self.pruning.prune_config(Arc::clone(&self.chain)) + pub fn prune_config(&self) -> Option { + self.pruning.prune_config(&self.chain) } /// Returns the max block that the node should run to, looking it up from the network if From 16ae640615d603514e22565bc217345871569996 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 20:29:06 +0200 Subject: [PATCH 062/250] chore: decrease local pending block expiration time (#7896) --- crates/rpc/rpc/src/eth/api/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index c23dfe1ac..6c936808e 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -355,7 +355,7 @@ where let now = Instant::now(); *lock = Some(PendingBlock { block: pending_block.clone(), - expires_at: now + Duration::from_secs(3), + expires_at: now + Duration::from_secs(1), }); Ok(Some(pending_block)) From 062b3d76b94ce54ef56c2c421a3e11340365e20f Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Thu, 25 Apr 2024 20:36:09 +0200 Subject: [PATCH 063/250] refactor: move network-txpool.rs example to its own folder (#7892) Co-authored-by: Oliver Nordbjerg --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + examples/Cargo.toml | 7 +------ examples/README.md | 6 +++--- examples/network-txpool/Cargo.toml | 13 +++++++++++++ .../src/main.rs} | 2 +- 6 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 examples/network-txpool/Cargo.toml rename examples/{network-txpool.rs => network-txpool/src/main.rs} (98%) diff --git a/Cargo.lock b/Cargo.lock index 256d5e4b6..0df1d9fd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4864,6 +4864,17 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "network-txpool" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-network", + "reth-provider", + "reth-transaction-pool", + "tokio", +] + [[package]] name = "nibble_vec" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 7b9b63e95..04e26fc44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,6 +80,7 @@ members = [ "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", + "examples/network-txpool/", "examples/rpc-db/", "examples/txpool-tracing/", "examples/polygon-p2p/", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 82b6be45a..2379e9a0f 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -31,9 +31,4 @@ path = "db-access.rs" [[example]] name = "network" -path = "network.rs" - -[[example]] -name = "network-txpool" -path = "network-txpool.rs" - +path = "network.rs" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index db0bdb999..dcec15d35 100644 --- a/examples/README.md +++ b/examples/README.md @@ -48,10 +48,10 @@ to make a PR! ## Mempool -| Example | Description | -| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| Example | Description | +|------------------------------------------------------| -------------------------------------------------------------------------------------------------------------------------- | | [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | -| [Standalone txpool](./network-txpool.rs) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | +| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | ## P2P diff --git a/examples/network-txpool/Cargo.toml b/examples/network-txpool/Cargo.toml new file mode 100644 index 000000000..12544a8f3 --- /dev/null +++ b/examples/network-txpool/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "network-txpool" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-provider = { workspace = true, features = ["test-utils"] } +eyre.workspace = true +tokio.workspace = true +reth-network.workspace = true +reth-transaction-pool.workspace = true diff --git a/examples/network-txpool.rs b/examples/network-txpool/src/main.rs similarity index 98% rename from examples/network-txpool.rs rename to examples/network-txpool/src/main.rs index 0af120a89..6f8d69eab 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool/src/main.rs @@ -4,7 +4,7 @@ //! Run with //! //! ```not_rust -//! cargo run --example network-txpool +//! cargo run --release -p network-txpool -- node //! ``` use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; From 844bcb86b708fc6bfa369aa2702766232c9e219b Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Thu, 25 Apr 2024 20:48:23 +0200 Subject: [PATCH 064/250] refactor: replace rpc TransactionKind with alloy_primitives::TxKind (#7885) Co-authored-by: Oliver Nordbjerg --- .../rpc-types-compat/src/transaction/typed.rs | 18 +--- .../rpc-types/src/eth/transaction/typed.rs | 90 ++----------------- crates/rpc/rpc-types/src/lib.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 5 +- 4 files changed, 13 insertions(+), 102 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index 03f502a20..b119a0956 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,13 +56,3 @@ pub fn to_primitive_transaction( }), }) } - -/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TxKind] -pub fn to_primitive_transaction_kind( - kind: reth_rpc_types::TransactionKind, -) -> reth_primitives::TxKind { - match kind { - reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TxKind::Call(to), - reth_rpc_types::TransactionKind::Create => reth_primitives::TxKind::Create, - } -} diff --git a/crates/rpc/rpc-types/src/eth/transaction/typed.rs b/crates/rpc/rpc-types/src/eth/transaction/typed.rs index bf995c353..6526bc2b6 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/typed.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/typed.rs @@ -2,10 +2,8 @@ //! transaction deserialized from the json input of an RPC call. Depending on what fields are set, //! it can be converted into the container type [`TypedTransactionRequest`]. -use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rlp::{Buf, BufMut, Decodable, Encodable, Error as RlpError, EMPTY_STRING_CODE}; +use alloy_primitives::{Bytes, TxKind, B256, U256}; use alloy_rpc_types::{AccessList, BlobTransactionSidecar}; -use serde::{Deserialize, Serialize}; /// Container type for various Ethereum transaction requests /// @@ -36,7 +34,7 @@ pub struct LegacyTransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -57,7 +55,7 @@ pub struct EIP2930TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -80,7 +78,7 @@ pub struct EIP1559TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -103,7 +101,7 @@ pub struct EIP4844TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -117,81 +115,3 @@ pub struct EIP4844TransactionRequest { /// Sidecar information for the transaction pub sidecar: BlobTransactionSidecar, } - -/// Represents the `to` field of a transaction request -/// -/// This determines what kind of transaction this is -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum TransactionKind { - /// Transaction will call this address or transfer funds to this address - Call(Address), - /// No `to` field set, this transaction will create a contract - Create, -} - -// == impl TransactionKind == - -impl TransactionKind { - /// If this transaction is a call this returns the address of the callee - pub fn as_call(&self) -> Option<&Address> { - match self { - TransactionKind::Call(to) => Some(to), - TransactionKind::Create => None, - } - } -} - -impl Encodable for TransactionKind { - /// This encodes the `to` field of a transaction request. - /// If the [TransactionKind] is a [TransactionKind::Call] it will encode the inner address: - /// `rlp(address)` - /// - /// If the [TransactionKind] is a [TransactionKind::Create] it will encode an empty list: - /// `rlp([])`, which is also - fn encode(&self, out: &mut dyn BufMut) { - match self { - TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => [].encode(out), - } - } - fn length(&self) -> usize { - match self { - TransactionKind::Call(to) => to.length(), - TransactionKind::Create => [].length(), - } - } -} - -impl Decodable for TransactionKind { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if let Some(&first) = buf.first() { - if first == EMPTY_STRING_CODE { - buf.advance(1); - Ok(TransactionKind::Create) - } else { - let addr =
::decode(buf)?; - Ok(TransactionKind::Call(addr)) - } - } else { - Err(RlpError::InputTooShort) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn raw_kind_encoding_sanity() { - // check the 0x80 encoding for Create - let mut buf = Vec::new(); - TransactionKind::Create.encode(&mut buf); - assert_eq!(buf, vec![0x80]); - - // check decoding - let buf = [0x80]; - let decoded = TransactionKind::decode(&mut &buf[..]).unwrap(); - assert_eq!(decoded, TransactionKind::Create); - } -} diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 68ad11c6e..0adcab0f3 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -38,7 +38,7 @@ pub use eth::{ ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, }, error::ToRpcError, - transaction::{self, TransactionKind, TransactionRequest, TypedTransactionRequest}, + transaction::{self, TransactionRequest, TypedTransactionRequest}, }; pub use mev::*; diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 2188b8d25..1ca8ed119 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -8,6 +8,7 @@ use crate::{ }, EthApi, EthApiSpec, }; +use alloy_primitives::TxKind as RpcTransactionKind; use async_trait::async_trait; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; @@ -33,8 +34,8 @@ use reth_rpc_types::{ LegacyTransactionRequest, }, AnyReceiptEnvelope, AnyTransactionReceipt, Index, Log, ReceiptWithBloom, Transaction, - TransactionInfo, TransactionKind as RpcTransactionKind, TransactionReceipt, TransactionRequest, - TypedTransactionRequest, WithOtherFields, + TransactionInfo, TransactionReceipt, TransactionRequest, TypedTransactionRequest, + WithOtherFields, }; use reth_rpc_types_compat::transaction::from_recovered_with_block_context; use reth_transaction_pool::{TransactionOrigin, TransactionPool}; From 3ad3bbc593782280c4d34ed41388373b77f7b3bd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 25 Apr 2024 21:23:21 +0200 Subject: [PATCH 065/250] chore: more launch builder style function (#7897) --- crates/node-builder/src/launch/common.rs | 22 ++++++++++++++++++++++ crates/node-builder/src/launch/mod.rs | 23 +++++++++++++---------- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/crates/node-builder/src/launch/common.rs b/crates/node-builder/src/launch/common.rs index f4d2a931c..c57e12cf6 100644 --- a/crates/node-builder/src/launch/common.rs +++ b/crates/node-builder/src/launch/common.rs @@ -145,6 +145,16 @@ impl LaunchContextWith { attachment: Attached::new(self.attachment, attachment), } } + + /// Consumes the type and calls a function with a reference to the context. + // Returns the context again + pub fn inspect(self, f: F) -> Self + where + F: FnOnce(&Self), + { + f(&self); + self + } } impl LaunchContextWith> { @@ -338,6 +348,12 @@ where ) } + /// Convenience function to [Self::init_genesis] + pub fn with_genesis(self) -> Result { + init_genesis(self.provider_factory().clone())?; + Ok(self) + } + /// Write the genesis block and state if it has not already been written pub fn init_genesis(&self) -> Result { init_genesis(self.provider_factory().clone()) @@ -352,6 +368,12 @@ where self.node_config().max_block(client, self.provider_factory().clone()).await } + /// Convenience function to [Self::start_prometheus_endpoint] + pub async fn with_prometheus(self) -> eyre::Result { + self.start_prometheus_endpoint().await?; + Ok(self) + } + /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { let prometheus_handle = self.node_config().install_prometheus_recorder()?; diff --git a/crates/node-builder/src/launch/mod.rs b/crates/node-builder/src/launch/mod.rs index 00304816c..408e47cd7 100644 --- a/crates/node-builder/src/launch/mod.rs +++ b/crates/node-builder/src/launch/mod.rs @@ -95,6 +95,7 @@ where config, } = target; + // setup the launch context let ctx = ctx .with_configured_globals() // load the toml config @@ -104,16 +105,18 @@ where // ensure certain settings take effect .with_adjusted_configs() // Create the provider factory - .with_provider_factory()?; - - info!(target: "reth::cli", "Database opened"); - - ctx.start_prometheus_endpoint().await?; - - debug!(target: "reth::cli", chain=%ctx.chain_id(), genesis=?ctx.genesis_hash(), "Initializing genesis"); - ctx.init_genesis()?; - - info!(target: "reth::cli", "\n{}", ctx.chain_spec().display_hardforks()); + .with_provider_factory()? + .inspect(|_| { + info!(target: "reth::cli", "Database opened"); + }) + .with_prometheus().await? + .inspect(|this| { + debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); + }) + .with_genesis()? + .inspect(|this| { + info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); + }); // setup the consensus instance let consensus: Arc = if ctx.is_dev() { From e2e5201d8a0599116a0e73a818062bf6958efda2 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 25 Apr 2024 21:50:38 +0200 Subject: [PATCH 066/250] chore(deps): bump enr, discv5, secp256k1 (#7000) Co-authored-by: Emilia Hane --- Cargo.lock | 75 +++---- Cargo.toml | 16 +- .../interfaces/src/test_utils/generators.rs | 14 +- crates/net/discv4/Cargo.toml | 3 +- crates/net/discv4/src/lib.rs | 4 +- crates/net/discv4/src/proto.rs | 183 ++++-------------- crates/net/discv5/Cargo.toml | 3 +- crates/net/discv5/src/enr.rs | 15 +- crates/net/discv5/src/lib.rs | 3 +- crates/net/dns/Cargo.toml | 5 +- crates/net/dns/src/lib.rs | 64 +++++- crates/net/dns/src/tree.rs | 2 +- crates/net/ecies/src/algorithm.rs | 6 +- crates/net/network/Cargo.toml | 6 +- crates/primitives/Cargo.toml | 8 +- crates/primitives/src/genesis.rs | 24 +-- crates/primitives/src/transaction/mod.rs | 4 +- crates/primitives/src/transaction/util.rs | 4 +- 18 files changed, 169 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0df1d9fd3..901d836cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2383,11 +2383,13 @@ dependencies = [ [[package]] name = "discv5" -version = "0.4.1" -source = "git+https://github.com/sigp/discv5?rev=04ac004#04ac0042a345a9edf93b090007e5d31c008261ed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cafb8ed8d460b7d1c8d4c970270d45ecb5e283179a3945143196624c55cda6ac" dependencies = [ "aes 0.7.5", "aes-gcm", + "alloy-rlp", "arrayvec", "delay_map", "enr", @@ -2402,7 +2404,6 @@ dependencies = [ "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", - "rlp", "smallvec", "socket2 0.4.10", "tokio", @@ -2577,10 +2578,11 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" [[package]] name = "enr" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +checksum = "4ab656b89cdd15051d92d0931888103508de14ef9e51177c86d478dfa551ce0f" dependencies = [ + "alloy-rlp", "base64 0.21.7", "bytes", "ed25519-dalek", @@ -2588,8 +2590,7 @@ dependencies = [ "k256", "log", "rand 0.8.5", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "sha3", "zeroize", @@ -4561,7 +4562,7 @@ dependencies = [ "reth-eth-wire", "reth-network", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "tokio", ] @@ -5445,7 +5446,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde_json", "tokio", "tokio-stream", @@ -6288,7 +6289,7 @@ dependencies = [ "reth-net-nat", "reth-network", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "serde", "tempfile", "toml", @@ -6365,8 +6366,7 @@ dependencies = [ "reth-net-nat", "reth-primitives", "reth-tracing", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "thiserror", "tokio", @@ -6391,8 +6391,7 @@ dependencies = [ "reth-metrics", "reth-primitives", "reth-tracing", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "thiserror", "tokio", "tracing", @@ -6407,11 +6406,12 @@ dependencies = [ "enr", "linked_hash_set", "parking_lot 0.12.1", + "rand 0.8.5", "reth-net-common", "reth-primitives", "reth-tracing", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_with", "thiserror", @@ -6474,7 +6474,7 @@ dependencies = [ "reth-provider", "reth-rpc", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde_json", "tokio", "tokio-stream", @@ -6501,7 +6501,7 @@ dependencies = [ "rand 0.8.5", "reth-net-common", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "sha2 0.10.8", "sha3", "thiserror", @@ -6545,7 +6545,7 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde", "snap", "test-fuzz", @@ -6572,7 +6572,7 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde", "test-fuzz", "thiserror", @@ -6700,7 +6700,7 @@ dependencies = [ "reth-eth-wire-types", "reth-network-api", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "thiserror", "tokio", "tracing", @@ -6849,7 +6849,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serial_test", @@ -6995,7 +6995,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "shellexpand", @@ -7189,7 +7189,7 @@ dependencies = [ "revm", "revm-primitives", "roaring", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serde_with", @@ -7315,7 +7315,7 @@ dependencies = [ "revm-inspectors", "revm-primitives", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "tempfile", @@ -7438,7 +7438,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serde_with", @@ -7718,7 +7718,7 @@ dependencies = [ "once_cell", "revm-primitives", "ripemd", - "secp256k1 0.28.2", + "secp256k1", "sha2 0.10.8", "substrate-bn", ] @@ -8151,17 +8151,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" -dependencies = [ - "rand 0.8.5", - "secp256k1-sys 0.8.1", - "serde", -] - [[package]] name = "secp256k1" version = "0.28.2" @@ -8169,16 +8158,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "rand 0.8.5", - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", + "secp256k1-sys", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 04e26fc44..954fd85d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -272,13 +272,8 @@ reth-trie-parallel = { path = "crates/trie-parallel" } reth-node-events = { path = "crates/node/events" } # revm -revm = { version = "8.0.0", features = [ - "std", - "secp256k1", -], default-features = false } -revm-primitives = { version = "3.1.0", features = [ - "std", -], default-features = false } +revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "dc614ee" } # eth @@ -359,7 +354,7 @@ http = "0.2.8" http-body = "0.4.5" # p2p -discv5 = { git = "https://github.com/sigp/discv5", rev = "04ac004" } +discv5 = "0.6.0" igd-next = "0.14.3" # rpc @@ -368,11 +363,12 @@ jsonrpsee-core = "0.22" jsonrpsee-types = "0.22" # crypto -secp256k1 = { version = "0.27.0", default-features = false, features = [ +secp256k1 = { version = "0.28", default-features = false, features = [ "global-context", "recovery", ] } -enr = { version = "=0.10.0", default-features = false, features = ["k256"] } +# TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 +enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } # for eip-4844 c-kzg = "1.0.0" diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index 0f1930b60..506358276 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -6,7 +6,7 @@ use reth_primitives::{ proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, SealedHeader, StorageEntry, Transaction, TransactionSigned, TxKind, TxLegacy, B256, U256, }; -use secp256k1::{KeyPair, Secp256k1}; +use secp256k1::{Keypair, Secp256k1}; use std::{ cmp::{max, min}, collections::{hash_map::DefaultHasher, BTreeMap}, @@ -91,22 +91,22 @@ pub fn random_tx(rng: &mut R) -> Transaction { /// - There is no guarantee that the nonce is not used twice for the same account pub fn random_signed_tx(rng: &mut R) -> TransactionSigned { let secp = Secp256k1::new(); - let key_pair = KeyPair::new(&secp, rng); + let key_pair = Keypair::new(&secp, rng); let tx = random_tx(rng); sign_tx_with_key_pair(key_pair, tx) } /// Signs the [Transaction] with the given key pair. -pub fn sign_tx_with_key_pair(key_pair: KeyPair, tx: Transaction) -> TransactionSigned { +pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); TransactionSigned::from_transaction_and_signature(tx, signature) } -/// Generates a set of [KeyPair]s based on the desired count. -pub fn generate_keys(rng: &mut R, count: usize) -> Vec { +/// Generates a set of [Keypair]s based on the desired count. +pub fn generate_keys(rng: &mut R, count: usize) -> Vec { let secp = Secp256k1::new(); - (0..count).map(|_| KeyPair::new(&secp, rng)).collect() + (0..count).map(|_| Keypair::new(&secp, rng)).collect() } /// Generate a random block filled with signed transactions (generated using @@ -404,7 +404,7 @@ mod tests { let signature_hash = tx.signature_hash(); for _ in 0..100 { - let key_pair = KeyPair::new(&secp, &mut rand::thread_rng()); + let key_pair = Keypair::new(&secp, &mut rand::thread_rng()); let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fa0e284ff..9a7cb943d 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -21,8 +21,7 @@ reth-net-nat.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } -rlp = "0.5" # needed for enr +enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 48e25c163..3ac6bfa8c 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -39,7 +39,7 @@ use discv5::{ }; use enr::Enr; use parking_lot::Mutex; -use proto::{EnrRequest, EnrResponse, EnrWrapper}; +use proto::{EnrRequest, EnrResponse}; use reth_primitives::{bytes::Bytes, hex, ForkId, PeerId, B256}; use secp256k1::SecretKey; use std::{ @@ -1279,7 +1279,7 @@ impl Discv4Service { self.send_packet( Message::EnrResponse(EnrResponse { request_hash, - enr: EnrWrapper::new(self.local_eip_868_enr.clone()), + enr: self.local_eip_868_enr.clone(), }), remote_addr, ); diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 8bbb84b62..bdca3bfb4 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,10 +1,8 @@ //! Discovery v4 protocol implementation. use crate::{error::DecodePacketError, EnrForkIdEntry, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; -use alloy_rlp::{ - length_of_length, Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable, -}; -use enr::{Enr, EnrKey}; +use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; +use enr::Enr; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, keccak256, pk2id, ForkId, NodeRecord, B256, @@ -112,8 +110,7 @@ impl Message { // Sign the payload with the secret key using recoverable ECDSA let signature: RecoverableSignature = SECP256K1.sign_ecdsa_recoverable( - &secp256k1::Message::from_slice(keccak256(&payload).as_ref()) - .expect("B256.len() == MESSAGE_SIZE"), + &secp256k1::Message::from_digest(keccak256(&payload).0), secret_key, ); @@ -158,7 +155,7 @@ impl Message { let recoverable_sig = RecoverableSignature::from_compact(signature, recovery_id)?; // recover the public key - let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_slice())?; + let msg = secp256k1::Message::from_digest(keccak256(&packet[97..]).0); let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?; let node_id = pk2id(&pk); @@ -234,85 +231,6 @@ pub struct Neighbours { pub expire: u64, } -/// Passthrough newtype to [`Enr`]. -/// -/// We need to wrap the ENR type because of Rust's orphan rules not allowing -/// implementing a foreign trait on a foreign type. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct EnrWrapper(Enr); - -impl EnrWrapper { - /// Creates a new instance of [`EnrWrapper`]. - pub fn new(enr: Enr) -> Self { - EnrWrapper(enr) - } -} - -impl Encodable for EnrWrapper -where - K: EnrKey, -{ - fn encode(&self, out: &mut dyn BufMut) { - let payload_length = self.0.signature().length() + - self.0.seq().length() + - self.0.iter().fold(0, |acc, (k, v)| acc + k.as_slice().length() + v.len()); - - let header = Header { list: true, payload_length }; - header.encode(out); - - self.0.signature().encode(out); - self.0.seq().encode(out); - - for (k, v) in self.0.iter() { - // Keys are byte data - k.as_slice().encode(out); - // Values are raw RLP encoded data - out.put_slice(v); - } - } - - fn length(&self) -> usize { - let payload_length = self.0.signature().length() + - self.0.seq().length() + - self.0.iter().fold(0, |acc, (k, v)| acc + k.as_slice().length() + v.len()); - payload_length + length_of_length(payload_length) - } -} - -fn to_alloy_rlp_error(e: rlp::DecoderError) -> RlpError { - match e { - rlp::DecoderError::RlpIsTooShort => RlpError::InputTooShort, - rlp::DecoderError::RlpInvalidLength => RlpError::Overflow, - rlp::DecoderError::RlpExpectedToBeList => RlpError::UnexpectedString, - rlp::DecoderError::RlpExpectedToBeData => RlpError::UnexpectedList, - rlp::DecoderError::RlpDataLenWithZeroPrefix | - rlp::DecoderError::RlpListLenWithZeroPrefix => RlpError::LeadingZero, - rlp::DecoderError::RlpInvalidIndirection => RlpError::NonCanonicalSize, - rlp::DecoderError::RlpIncorrectListLen => { - RlpError::Custom("incorrect list length when decoding rlp") - } - rlp::DecoderError::RlpIsTooBig => RlpError::Custom("rlp is too big"), - rlp::DecoderError::RlpInconsistentLengthAndData => { - RlpError::Custom("inconsistent length and data when decoding rlp") - } - rlp::DecoderError::Custom(s) => RlpError::Custom(s), - } -} - -impl Decodable for EnrWrapper { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let enr = as rlp::Decodable>::decode(&rlp::Rlp::new(buf)) - .map_err(to_alloy_rlp_error) - .map(EnrWrapper::new); - if enr.is_ok() { - // Decode was successful, advance buffer - let header = Header::decode(buf)?; - buf.advance(header.payload_length); - } - enr - } -} - /// A [ENRRequest packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrrequest-packet-0x05). /// /// This packet is used to request the current version of a node's Ethereum Node Record (ENR). @@ -327,12 +245,12 @@ pub struct EnrRequest { /// /// This packet is used to respond to an ENRRequest packet and includes the requested ENR along with /// the hash of the original request. -#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)] +#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] pub struct EnrResponse { /// The hash of the ENRRequest packet being replied to. pub request_hash: B256, /// The ENR (Ethereum Node Record) for the responding node. - pub enr: EnrWrapper, + pub enr: Enr, } // === impl EnrResponse === @@ -342,37 +260,11 @@ impl EnrResponse { /// /// See also pub fn eth_fork_id(&self) -> Option { - let mut maybe_fork_id = self.enr.0.get_raw_rlp(b"eth")?; + let mut maybe_fork_id = self.enr.get_raw_rlp(b"eth")?; EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(|entry| entry.fork_id) } } -impl Decodable for EnrResponse { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let b = &mut &**buf; - let rlp_head = Header::decode(b)?; - if !rlp_head.list { - return Err(RlpError::UnexpectedString) - } - // let started_len = b.len(); - let this = Self { - request_hash: alloy_rlp::Decodable::decode(b)?, - enr: EnrWrapper::::decode(b)?, - }; - // TODO: `Decodable` can be derived once we have native alloy_rlp decoding for ENR: - // Skipping the size check here is fine since the `buf` is the UDP datagram - // let consumed = started_len - b.len(); - // if consumed != rlp_head.payload_length { - // return Err(alloy_rlp::Error::ListLengthMismatch { - // expected: rlp_head.payload_length, - // got: consumed, - // }) - // } - *buf = *b; - Ok(this) - } -} - /// Represents a Ping packet. /// /// A [Ping packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#ping-packet-0x01). @@ -750,7 +642,6 @@ mod tests { #[test] fn encode_decode_enr_msg() { - use self::EnrWrapper; use alloy_rlp::Decodable; use enr::secp256k1::SecretKey; use std::net::Ipv4Addr; @@ -770,7 +661,7 @@ mod tests { let forkentry = EnrForkIdEntry { fork_id }; forkentry.encode(&mut buf); builder.add_value_rlp("eth", buf.into()); - EnrWrapper::new(builder.build(&key).unwrap()) + builder.build(&key).unwrap() }; let enr_response = EnrResponse { request_hash: rng.gen(), enr }; @@ -789,30 +680,25 @@ mod tests { #[test] fn encode_known_rlp_enr() { - use self::EnrWrapper; use alloy_rlp::Decodable; use enr::{secp256k1::SecretKey, EnrPublicKey}; use std::net::Ipv4Addr; - let valid_record = - hex!("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f" - ); - let signature = - hex!("7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c" - ); + let valid_record = hex!("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f"); + let signature = hex!("7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c"); let expected_pubkey = hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"); - let enr = EnrWrapper::::decode(&mut &valid_record[..]).unwrap(); - let pubkey = enr.0.public_key().encode(); + let enr = Enr::::decode(&mut &valid_record[..]).unwrap(); + let pubkey = enr.public_key().encode(); - assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); - assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); - assert_eq!(enr.0.tcp4(), None); - assert_eq!(enr.0.signature(), &signature[..]); + assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(enr.id(), Some(String::from("v4"))); + assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT)); + assert_eq!(enr.tcp4(), None); + assert_eq!(enr.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); - assert!(enr.0.verify()); + assert!(enr.verify()); assert_eq!(&alloy_rlp::encode(&enr)[..], &valid_record[..]); @@ -833,19 +719,19 @@ mod tests { hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"); let mut valid_record_buf = valid_record.as_slice(); - let enr = EnrWrapper::::decode(&mut valid_record_buf).unwrap(); - let pubkey = enr.0.public_key().encode(); + let enr = Enr::::decode(&mut valid_record_buf).unwrap(); + let pubkey = enr.public_key().encode(); // Byte array must be consumed after enr has finished decoding assert!(valid_record_buf.is_empty()); - assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); - assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); - assert_eq!(enr.0.tcp4(), None); - assert_eq!(enr.0.signature(), &signature[..]); + assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(enr.id(), Some(String::from("v4"))); + assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT)); + assert_eq!(enr.tcp4(), None); + assert_eq!(enr.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); - assert!(enr.0.verify()); + assert!(enr.verify()); } // test vector from the enr library rlp encoding tests @@ -863,20 +749,23 @@ mod tests { let mut builder = Enr::builder(); builder.ip(ip.into()); builder.tcp4(tcp); - EnrWrapper::new(builder.build(&key).unwrap()) + builder.build(&key).unwrap() }; let mut encoded_bytes = &alloy_rlp::encode(&enr)[..]; - let decoded_enr = EnrWrapper::::decode(&mut encoded_bytes).unwrap(); + let decoded_enr = Enr::::decode(&mut encoded_bytes).unwrap(); // Byte array must be consumed after enr has finished decoding assert!(encoded_bytes.is_empty()); assert_eq!(decoded_enr, enr); - assert_eq!(decoded_enr.0.id(), Some("v4".into())); - assert_eq!(decoded_enr.0.ip4(), Some(ip)); - assert_eq!(decoded_enr.0.tcp4(), Some(tcp)); - assert_eq!(decoded_enr.0.public_key().encode(), key.public().encode()); - assert!(decoded_enr.0.verify()); + assert_eq!(decoded_enr.id(), Some("v4".into())); + assert_eq!(decoded_enr.ip4(), Some(ip)); + assert_eq!(decoded_enr.tcp4(), Some(tcp)); + assert_eq!( + decoded_enr.public_key().encode(), + key.public_key(secp256k1::SECP256K1).encode() + ); + assert!(decoded_enr.verify()); } } diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml index 03b856be9..705ea17a8 100644 --- a/crates/net/discv5/Cargo.toml +++ b/crates/net/discv5/Cargo.toml @@ -18,9 +18,8 @@ reth-metrics.workspace = true # ethereum alloy-rlp.workspace = true -rlp = "0.5.2" discv5 = { workspace = true, features = ["libp2p"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +enr.workspace = true multiaddr = { version = "0.18", default-features = false } libp2p-identity = "0.2" secp256k1.workspace = true diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index b810c1dc6..088baf18e 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -41,30 +41,25 @@ pub struct EnrCombinedKeyWrapper(pub discv5::Enr); impl From> for EnrCombinedKeyWrapper { fn from(value: Enr) -> Self { - let encoded_enr = rlp::encode(&value); - let enr = rlp::decode::(&encoded_enr).unwrap(); - - Self(enr) + let encoded_enr = alloy_rlp::encode(&value); + Self(alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap()) } } impl From for Enr { fn from(val: EnrCombinedKeyWrapper) -> Self { - let EnrCombinedKeyWrapper(enr) = val; - let encoded_enr = rlp::encode(&enr); - - rlp::decode::>(&encoded_enr).unwrap() + let encoded_enr = alloy_rlp::encode(&val.0); + alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap() } } #[cfg(test)] mod tests { + use super::*; use alloy_rlp::Encodable; use discv5::enr::{CombinedKey, EnrKey}; use reth_primitives::{Hardfork, NodeRecord, MAINNET}; - use super::*; - #[test] fn discv5_discv4_id_conversion() { let discv5_pk = CombinedKey::generate_secp256k1().public(); diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 7e9fd81b1..de74f3fee 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -377,8 +377,6 @@ impl Discv5 { pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { match update { discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) | - // `EnrAdded` not used in discv5 codebase - discv5::Event::EnrAdded { .. } | // `Discovered` not unique discovered peers discv5::Event::Discovered(_) => None, discv5::Event::NodeInserted { replaced: _, .. } => { @@ -404,6 +402,7 @@ impl Discv5 { self.on_discovered_peer(&enr, remote_socket) } + _ => None, } } diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 003a6cad7..18d7bf815 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -17,9 +17,8 @@ reth-primitives.workspace = true reth-net-common.workspace = true # ethereum -alloy-rlp.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } @@ -39,8 +38,10 @@ serde = { workspace = true, optional = true } serde_with = { version = "3.3.0", optional = true } [dev-dependencies] +alloy-rlp.workspace = true tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread"] } reth-tracing.workspace = true +rand.workspace = true [features] default = ["serde"] diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 6db9c9ee2..03c72e330 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -392,8 +392,6 @@ pub enum DnsDiscoveryEvent { /// Converts an [Enr] into a [NodeRecord] fn convert_enr_node_record(enr: &Enr) -> Option { - use alloy_rlp::Decodable; - let node_record = NodeRecord { address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, @@ -402,8 +400,7 @@ fn convert_enr_node_record(enr: &Enr) -> Option } .into_ipv4_mapped(); - let mut maybe_fork_id = enr.get(b"eth")?; - let fork_id = ForkId::decode(&mut maybe_fork_id).ok(); + let fork_id = enr.get_decodable::(b"eth").transpose().ok()?; Some(DnsNodeRecordUpdate { node_record, fork_id, enr: enr.clone() }) } @@ -412,12 +409,63 @@ fn convert_enr_node_record(enr: &Enr) -> Option mod tests { use super::*; use crate::tree::TreeRootEntry; - use alloy_rlp::Encodable; + use alloy_rlp::{Decodable, Encodable}; use enr::EnrKey; - use reth_primitives::{Chain, Hardfork, MAINNET}; + use reth_primitives::{Chain, ForkHash, Hardfork, MAINNET}; use secp256k1::rand::thread_rng; use std::{future::poll_fn, net::Ipv4Addr}; + #[test] + fn test_convert_enr_node_record() { + // rig + let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let enr = Enr::builder() + .ip("127.0.0.1".parse().unwrap()) + .udp4(9000) + .tcp4(30303) + .add_value(b"eth", &MAINNET.latest_fork_id()) + .build(&secret_key) + .unwrap(); + + // test + let node_record_update = convert_enr_node_record(&enr).unwrap(); + + assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::().unwrap()); + assert_eq!(node_record_update.node_record.tcp_port, 30303); + assert_eq!(node_record_update.node_record.udp_port, 9000); + assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id())); + assert_eq!(node_record_update.enr, enr); + } + + #[test] + fn test_decode_and_convert_enr_node_record() { + // rig + + let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let enr = Enr::builder() + .ip("127.0.0.1".parse().unwrap()) + .udp4(9000) + .tcp4(30303) + .add_value(b"eth", &MAINNET.latest_fork_id()) + .add_value(b"opstack", &ForkId { hash: ForkHash(rand::random()), next: rand::random() }) + .build(&secret_key) + .unwrap(); + + let mut encoded_enr = vec![]; + enr.encode(&mut encoded_enr); + + // test + let decoded_enr = Enr::decode(&mut &encoded_enr[..]).unwrap(); + + let node_record_update = convert_enr_node_record(&decoded_enr).unwrap(); + + assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::().unwrap()); + assert_eq!(node_record_update.node_record.tcp_port, 30303); + assert_eq!(node_record_update.node_record.udp_port, 9000); + assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id())); + assert_eq!(node_record_update.enr, enr); + } + #[tokio::test] async fn test_start_root_sync() { reth_tracing::init_test_tracing(); @@ -461,10 +509,8 @@ mod tests { resolver.insert(link.domain.clone(), root.to_string()); let mut builder = Enr::builder(); - let mut buf = Vec::new(); let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); - fork_id.encode(&mut buf); - builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &buf); + builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &fork_id); let enr = builder.build(&secret_key).unwrap(); resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64()); diff --git a/crates/net/dns/src/tree.rs b/crates/net/dns/src/tree.rs index 53220f694..614d5f1d2 100644 --- a/crates/net/dns/src/tree.rs +++ b/crates/net/dns/src/tree.rs @@ -22,7 +22,7 @@ use crate::error::{ ParseEntryResult, }; use data_encoding::{BASE32_NOPAD, BASE64URL_NOPAD}; -use enr::{Enr, EnrError, EnrKey, EnrKeyUnambiguous, EnrPublicKey}; +use enr::{Enr, EnrKey, EnrKeyUnambiguous, EnrPublicKey, Error as EnrError}; use reth_primitives::{hex, Bytes}; use secp256k1::SecretKey; #[cfg(feature = "serde")] diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 5dce7fee6..bd1eb1d32 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -399,7 +399,7 @@ impl ECIES { let msg = x ^ self.nonce; let (rec_id, sig) = SECP256K1 .sign_ecdsa_recoverable( - &secp256k1::Message::from_slice(msg.as_slice()).unwrap(), + &secp256k1::Message::from_digest(msg.0), &self.ephemeral_secret_key, ) .serialize_compact(); @@ -473,7 +473,7 @@ impl ECIES { let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); self.remote_ephemeral_public_key = Some(SECP256K1.recover_ecdsa( - &secp256k1::Message::from_slice((x ^ self.remote_nonce.unwrap()).as_ref()).unwrap(), + &secp256k1::Message::from_digest((x ^ self.remote_nonce.unwrap()).0), &signature, )?); self.ephemeral_shared_secret = @@ -631,7 +631,7 @@ impl ECIES { let tag = self.egress_mac.as_mut().unwrap().digest(); out.reserve(ECIES::header_len()); - out.extend_from_slice(&header); + out.extend_from_slice(&header[..]); out.extend_from_slice(tag.as_slice()); } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index dbf7f5fa2..c06ff1518 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -96,11 +96,7 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json"] -test-utils = [ - "reth-provider/test-utils", - "dep:tempfile", - "reth-transaction-pool/test-utils", -] +test-utils = ["reth-provider/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils"] geth-tests = [] [[bench]] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e3828c913..ab6b44303 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -27,7 +27,7 @@ alloy-trie = { workspace = true, features = ["serde"] } nybbles = { workspace = true, features = ["serde", "rlp"] } alloy-genesis.workspace = true alloy-eips.workspace = true -enr = { workspace = true, features = ["rust-secp256k1"] } +enr.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -105,12 +105,10 @@ arbitrary = [ "dep:arbitrary", "dep:proptest", "dep:proptest-derive", - "zstd-codec" + "zstd-codec", ] c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:sha2", "dep:tempfile"] -zstd-codec = [ - "dep:zstd" -] +zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ "reth-codecs/optimism", diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index 52b24facb..991b01bd7 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -13,7 +13,7 @@ mod allocator { use alloy_genesis::GenesisAccount; use secp256k1::{ rand::{thread_rng, RngCore}, - KeyPair, Secp256k1, + Keypair, Secp256k1, }; use std::collections::{hash_map::Entry, BTreeMap, HashMap}; @@ -73,9 +73,9 @@ mod allocator { /// Add a funded account to the genesis alloc. /// /// Returns the key pair for the account and the account's address. - pub fn new_funded_account(&mut self, balance: U256) -> (KeyPair, Address) { + pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); @@ -90,9 +90,9 @@ mod allocator { &mut self, balance: U256, code: Bytes, - ) -> (KeyPair, Address) { + ) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert( @@ -110,9 +110,9 @@ mod allocator { &mut self, balance: U256, storage: BTreeMap, - ) -> (KeyPair, Address) { + ) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert( @@ -130,9 +130,9 @@ mod allocator { &mut self, code: Bytes, storage: BTreeMap, - ) -> (KeyPair, Address) { + ) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert( @@ -146,9 +146,9 @@ mod allocator { /// Adds an account with code to the genesis alloc. /// /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code(&mut self, code: Bytes) -> (KeyPair, Address) { + pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); @@ -169,7 +169,7 @@ mod allocator { /// Returns the key pair for the account and the account's address. pub fn add_account(&mut self, account: GenesisAccount) -> Address { let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); + let pair = Keypair::new(&secp, &mut self.rng); let address = public_key_to_address(pair.public_key()); self.alloc.insert(address, account); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index dae6ab076..a4ec978a3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1755,7 +1755,7 @@ mod tests { use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_codecs::Compact; - use secp256k1::{KeyPair, Secp256k1}; + use secp256k1::{Keypair, Secp256k1}; use std::str::FromStr; #[test] @@ -2048,7 +2048,7 @@ mod tests { tx.set_chain_id(chain_id % (u64::MAX / 2 - 36)); } - let key_pair = KeyPair::new(&secp, &mut rng); + let key_pair = Keypair::new(&secp, &mut rng); let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 638064c12..b4a2db7f6 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -18,7 +18,7 @@ pub(crate) mod secp256k1 { let sig = RecoverableSignature::from_compact(&sig[0..64], RecoveryId::from_i32(sig[64] as i32)?)?; - let public = SECP256K1.recover_ecdsa(&Message::from_slice(&msg[..32])?, &sig)?; + let public = SECP256K1.recover_ecdsa(&Message::from_digest(*msg), &sig)?; Ok(public_key_to_address(public)) } @@ -26,7 +26,7 @@ pub(crate) mod secp256k1 { /// Returns the corresponding signature. pub fn sign_message(secret: B256, message: B256) -> Result { let sec = SecretKey::from_slice(secret.as_ref())?; - let s = SECP256K1.sign_ecdsa_recoverable(&Message::from_slice(&message[..])?, &sec); + let s = SECP256K1.sign_ecdsa_recoverable(&Message::from_digest(message.0), &sec); let (rec_id, data) = s.serialize_compact(); let signature = Signature { From adf1d25a891c4b9ad41830e43fd3be55749408a7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 25 Apr 2024 22:31:01 +0200 Subject: [PATCH 067/250] feat(op): cmd init at block (#7784) Co-authored-by: Oliver Nordbjerg Co-authored-by: Roman Krasiuk --- Cargo.lock | 2 + bin/reth/Cargo.toml | 1 + bin/reth/src/cli/mod.rs | 8 +- bin/reth/src/commands/init_state.rs | 107 ++++++++ bin/reth/src/commands/mod.rs | 1 + crates/node-core/Cargo.toml | 7 +- crates/node-core/src/init.rs | 252 +++++++++++++++++- .../bundle_state_with_receipts.rs | 4 +- .../src/bundle_state/state_changes.rs | 1 + .../src/bundle_state/state_reverts.rs | 26 +- 10 files changed, 391 insertions(+), 18 deletions(-) create mode 100644 bin/reth/src/commands/init_state.rs diff --git a/Cargo.lock b/Cargo.lock index 901d836cd..edc487bad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6118,6 +6118,7 @@ dependencies = [ "serde_json", "similar-asserts", "tempfile", + "thiserror", "tikv-jemallocator", "tokio", "toml", @@ -6995,6 +6996,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie", "secp256k1", "serde", "serde_json", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ff2515464..5e47506db 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -104,6 +104,7 @@ itertools.workspace = true rayon.workspace = true boyer-moore-magiclen = "0.2.16" ahash = "0.8" +thiserror.workspace = true # p2p discv5.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 34fd09456..9c81b0aec 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -6,8 +6,8 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, node, node::NoArgs, p2p, - recover, stage, test_vectors, + config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, + p2p, recover, stage, test_vectors, }, version::{LONG_VERSION, SHORT_VERSION}, }; @@ -145,6 +145,7 @@ impl Cli { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), @@ -176,6 +177,9 @@ pub enum Commands { /// Initialize the database from a genesis file. #[command(name = "init")] Init(init_cmd::InitCommand), + /// Initialize the database from a state dump file. + #[command(name = "init-state")] + InitState(init_state::InitStateCommand), /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs new file mode 100644 index 000000000..c05f064b3 --- /dev/null +++ b/bin/reth/src/commands/init_state.rs @@ -0,0 +1,107 @@ +//! Command that initializes the node from a genesis file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use clap::Parser; +use reth_db::{database::Database, init_db}; +use reth_node_core::init::{init_from_state_dump, init_genesis}; +use reth_primitives::{ChainSpec, B256}; +use reth_provider::ProviderFactory; + +use std::{fs::File, io::BufReader, path::PathBuf, sync::Arc}; +use tracing::info; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Can + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(long, value_name = "STATE_DUMP_FILE", verbatim_doc_comment, default_value = None)] + state: Option, + + #[command(flatten)] + db: DatabaseArgs, +} + +impl InitStateCommand { + /// Execute the `init` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth init starting"); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let db_path = data_dir.db_path(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + let db = Arc::new(init_db(&db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + + info!(target: "reth::cli", "Writing genesis block"); + + let hash = match self.state { + Some(path) => init_at_state(path, provider_factory)?, + None => init_genesis(provider_factory)?, + }; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} + +/// Initialize chain with state at specific block, from a file with state dump. +pub fn init_at_state( + state_dump_path: PathBuf, + factory: ProviderFactory, +) -> eyre::Result { + info!(target: "reth::cli", + path=?state_dump_path, + "Opening state dump"); + + let file = File::open(state_dump_path)?; + let reader = BufReader::new(file); + + init_from_state_dump(reader, factory) +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 278531f71..03d5a8287 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -7,6 +7,7 @@ pub mod dump_genesis; pub mod import; pub mod init_cmd; +pub mod init_state; pub mod node; pub mod p2p; diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index d6df37f09..4bce2908d 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -32,6 +32,7 @@ reth-network-api.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true +reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true @@ -71,7 +72,11 @@ hyper.workspace = true tracing.workspace = true # crypto -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } # async futures.workspace = true diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 7f529c2b0..eb513cc40 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -13,14 +13,36 @@ use reth_primitives::{ use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, - HistoryWriter, OriginalValuesKnown, ProviderError, ProviderFactory, + BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, + DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, + ProviderFactory, }; +use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; +use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, + io::BufRead, + ops::DerefMut, sync::Arc, }; -use tracing::debug; +use tracing::{debug, error, info, trace}; + +/// Default soft limit for number of bytes to read from state dump file, before inserting into +/// database. +/// +/// Default is 1 GB. +pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000; + +/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB +/// +/// Approximate is 285 228 accounts. +// +// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per +// account) +pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228; + +/// Soft limit for the number of flushed updates after which to log progress summary. +const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000; /// Database initialization error type. #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] @@ -34,10 +56,19 @@ pub enum InitDatabaseError { /// Actual genesis hash. database_hash: B256, }, - /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Computed state root doesn't match state root in state dump file. + #[error( + "state root mismatch, state dump: {expected_state_root}, computed: {computed_state_root}" + )] + SateRootMismatch { + /// Expected state root. + expected_state_root: B256, + /// Actual state root. + computed_state_root: B256, + }, } impl From for InitDatabaseError { @@ -102,6 +133,16 @@ pub fn insert_genesis_state<'a, 'b, DB: Database>( tx: &::TXMut, capacity: usize, alloc: impl Iterator, +) -> ProviderResult<()> { + insert_state::(tx, capacity, alloc, 0) +} + +/// Inserts state at given block into database. +pub fn insert_state<'a, 'b, DB: Database>( + tx: &::TXMut, + capacity: usize, + alloc: impl Iterator, + block: u64, ) -> ProviderResult<()> { let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); let mut reverts_init = HashMap::with_capacity(capacity); @@ -149,18 +190,20 @@ pub fn insert_genesis_state<'a, 'b, DB: Database>( ), ); } - let all_reverts_init: RevertsInit = HashMap::from([(0, reverts_init)]); + let all_reverts_init: RevertsInit = HashMap::from([(block, reverts_init)]); let bundle = BundleStateWithReceipts::new_init( state_init, all_reverts_init, contracts.into_iter().collect(), Receipts::new(), - 0, + block, ); bundle.write_to_storage(tx, None, OriginalValuesKnown::Yes)?; + trace!(target: "reth::cli", "Inserted state"); + Ok(()) } @@ -174,6 +217,8 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( alloc.clone().map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); provider.insert_account_for_hashing(alloc_accounts)?; + trace!(target: "reth::cli", "Inserted account hashes"); + let alloc_storage = alloc.filter_map(|(addr, account)| { // only return Some if there is storage account.storage.as_ref().map(|storage| { @@ -188,6 +233,8 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( }); provider.insert_storage_for_hashing(alloc_storage)?; + trace!(target: "reth::cli", "Inserted storage hashes"); + Ok(()) } @@ -195,17 +242,30 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( pub fn insert_genesis_history<'a, 'b, DB: Database>( provider: &DatabaseProviderRW, alloc: impl Iterator + Clone, +) -> ProviderResult<()> { + insert_history::(provider, alloc, 0) +} + +/// Inserts history indices for genesis accounts and storage. +pub fn insert_history<'a, 'b, DB: Database>( + provider: &DatabaseProviderRW, + alloc: impl Iterator + Clone, + block: u64, ) -> ProviderResult<()> { let account_transitions = - alloc.clone().map(|(addr, _)| (*addr, vec![0])).collect::>(); + alloc.clone().map(|(addr, _)| (*addr, vec![block])).collect::>(); provider.insert_account_history_index(account_transitions)?; + trace!(target: "reth::cli", "Inserted account history"); + let storage_transitions = alloc .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage))) - .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![0]))) + .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![block]))) .collect::>(); provider.insert_storage_history_index(storage_transitions)?; + trace!(target: "reth::cli", "Inserted storage history"); + Ok(()) } @@ -233,6 +293,182 @@ pub fn insert_genesis_header( Ok(()) } +/// Initialize chain with state at specific block, from reader of state dump. +pub fn init_from_state_dump( + mut reader: impl BufRead, + factory: ProviderFactory, +) -> eyre::Result { + let block = factory.last_block_number()?; + let hash = factory.block_hash(block)?.unwrap(); + + debug!(target: "reth::cli", + block, + chain=%factory.chain_spec().chain, + "Initializing state at block" + ); + + let mut total_inserted_accounts = 0; + let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); + let mut chunk_total_byte_len = 0; + let mut line = String::new(); + + // first line can be state root, then it can be used for verifying against computed state root + reader.read_line(&mut line)?; + let expected_state_root = serde_json::from_str::(&line)?.root; + + trace!(target: "reth::cli", + root=%expected_state_root, + "Read state root from file" + ); + + line.clear(); + + // remaining lines are accounts + let mut provider_rw = factory.provider_rw()?; + while let Ok(n) = reader.read_line(&mut line) { + chunk_total_byte_len += n; + if DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK <= chunk_total_byte_len || n == 0 { + // acc + total_inserted_accounts += accounts.len(); + + info!(target: "reth::cli", + chunk_total_byte_len, + parsed_new_accounts=accounts.len(), + total_inserted_accounts, + "Writing accounts to db" + ); + + // reset + chunk_total_byte_len = 0; + + // use transaction to insert genesis header + insert_genesis_hashes( + &provider_rw, + accounts.iter().map(|(address, account)| (address, account)), + )?; + insert_history( + &provider_rw, + accounts.iter().map(|(address, account)| (address, account)), + block, + )?; + + // block is already written to static files + let tx = provider_rw.deref_mut().tx_mut(); + insert_state::( + tx, + accounts.len(), + accounts.iter().map(|(address, account)| (address, account)), + block, + )?; + + accounts.clear(); + } + + if n == 0 { + break; + } + + let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; + accounts.push((address, genesis_account)); + + line.clear(); + } + + // compute and compare state root. this advances the stage checkpoints. + let computed_state_root = compute_state_root(&provider_rw)?; + if computed_state_root != expected_state_root { + error!(target: "reth::cli", + ?computed_state_root, + ?expected_state_root, + "Computed state root does not match state root in state dump" + ); + + Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? + } else { + info!(target: "reth::cli", + ?computed_state_root, + "Computed state root matches state root in state dump" + ); + } + + provider_rw.commit()?; + + Ok(hash) +} + +/// Computes the state root (from scratch) based on the accounts and storages present in the +/// database. +fn compute_state_root(provider: &DatabaseProviderRW) -> eyre::Result { + trace!(target: "reth::cli", "Computing state root"); + + let tx = provider.tx_ref(); + let mut intermediate_state: Option = None; + let mut total_flushed_updates = 0; + + loop { + match StateRootComputer::from_tx(tx) + .with_intermediate_state(intermediate_state) + .root_with_progress()? + { + StateRootProgress::Progress(state, _, updates) => { + let updates_len = updates.len(); + + trace!(target: "reth::cli", + last_account_key = %state.last_account_key, + updates_len, + total_flushed_updates, + "Flushing trie updates" + ); + + intermediate_state = Some(*state); + updates.flush(tx)?; + + total_flushed_updates += updates_len; + + if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 { + info!(target: "reth::cli", + total_flushed_updates, + "Flushing trie updates" + ); + } + } + StateRootProgress::Complete(root, _, updates) => { + let updates_len = updates.len(); + + updates.flush(tx)?; + + total_flushed_updates += updates_len; + + trace!(target: "reth::cli", + %root, + updates_len = updates_len, + total_flushed_updates, + "State root has been computed" + ); + + return Ok(root) + } + } + } +} + +/// Type to deserialize state root from state dump file. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +struct StateRoot { + root: B256, +} + +/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's +/// address. +#[derive(Debug, Serialize, Deserialize)] +struct GenesisAccountWithAddress { + /// The account's balance, nonce, code, and storage. + #[serde(flatten)] + genesis_account: GenesisAccount, + /// The account's address. + address: Address, +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 1153464f7..baf5fa597 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -297,8 +297,8 @@ impl BundleStateWithReceipts { /// files if `static_file_producer` is `Some`. It should be none if there is any kind of /// pruning/filtering over the receipts. /// - /// `omit_changed_check` should be set to true of bundle has some of it data - /// detached, This would make some original values not known. + /// `omit_changed_check` should be set to true if bundle has some of its data detached. This + /// would make some original values not known. pub fn write_to_storage( self, tx: &TX, diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index a62606ded..7f7bde79e 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -77,6 +77,7 @@ impl StateChanges { } } } + Ok(()) } } diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 63c5595c5..e61572cf5 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,6 +1,6 @@ use rayon::slice::ParallelSliceMut; use reth_db::{ - cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, models::{AccountBeforeTx, BlockNumberAddress}, tables, transaction::{DbTx, DbTxMut}, @@ -74,15 +74,31 @@ impl StateReverts { // Write account changes tracing::trace!(target: "provider::reverts", "Writing account changes"); let mut account_changeset_cursor = tx.cursor_dup_write::()?; + + // append entries if key is new + let should_append_accounts = + account_changeset_cursor.last()?.map_or(true, |(block_number, _)| { + block_number < first_block || block_number == first_block && block_number == 0 + }); for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; // Sort accounts by address. account_block_reverts.par_sort_by_key(|a| a.0); + for (address, info) in account_block_reverts { - account_changeset_cursor.append_dup( - block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, - )?; + if should_append_accounts { + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; + } else { + // upsert on dupsort tables will append to subkey. see implementation of + // DbCursorRW::upsert for reth_db::implementation::mdbx::cursor::Cursor + account_changeset_cursor.upsert( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; + } } } From 7efdbf4924319ff3dc3148f6dd44e5269e338c9f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 25 Apr 2024 23:12:43 +0200 Subject: [PATCH 068/250] fix: account for legacy tx in `try_from` tx to `TransactionSignedEcRecovered` (#7882) --- crates/primitives/src/transaction/mod.rs | 28 ++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index a4ec978a3..76d9b0197 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1726,12 +1726,36 @@ impl TryFrom for TransactionSignedEcRecovered { fn try_from(tx: reth_rpc_types::Transaction) -> Result { let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + let transaction: Transaction = tx.try_into()?; + TransactionSigned::from_transaction_and_signature( - tx.try_into()?, + transaction.clone(), Signature { r: signature.r, s: signature.s, - odd_y_parity: signature.y_parity.ok_or(ConversionError::MissingYParity)?.0, + odd_y_parity: if let Some(y_parity) = signature.y_parity { + y_parity.0 + } else { + match transaction.tx_type() { + // If the transaction type is Legacy, adjust the v component of the + // signature according to the Ethereum specification + TxType::Legacy => { + // Calculate the new v value based on the EIP-155 formula: + // v = {0,1} + CHAIN_ID * 2 + 35 + !(signature.v - + U256::from(if let Some(chain_id) = transaction.chain_id() { + // If CHAIN_ID is available, calculate the new v value + // accordingly + chain_id.saturating_mul(2).saturating_add(35) + } else { + // If CHAIN_ID is not available, set v = {0,1} + 27 + 27 + })) + .is_zero() + } + _ => !signature.v.is_zero(), + } + }, }, ) .try_into_ecrecovered() From 57d09e84beaa4dafa505461db733d672163e4d99 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Fri, 26 Apr 2024 01:10:21 +0200 Subject: [PATCH 069/250] deps: remove `webpki-roots` from `reqwest` deps (#7887) --- Cargo.lock | 10 ++-------- crates/optimism/node/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index edc487bad..027cd7e85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4052,7 +4052,7 @@ dependencies = [ "tokio-util", "tracing", "url", - "webpki-roots 0.26.1", + "webpki-roots", ] [[package]] @@ -5982,6 +5982,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.21.11", + "rustls-native-certs 0.6.3", "rustls-pemfile 1.0.4", "serde", "serde_json", @@ -5997,7 +5998,6 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.25.4", "winreg 0.50.0", ] @@ -9845,12 +9845,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.1" diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 36bfe96b5..29a99a961 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -39,7 +39,7 @@ hyper.workspace = true http.workspace = true http-body.workspace = true reqwest = { version = "0.11", default-features = false, features = [ - "rustls-tls", + "rustls-tls-native-roots", ] } tracing.workspace = true From 6425064d07cb440b4e8b47e351b0d10614bb05ff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 26 Apr 2024 11:24:26 +0200 Subject: [PATCH 070/250] fix: use enrforkid in dns (#7900) Co-authored-by: Emilia Hane --- crates/ethereum-forks/src/forkid.rs | 26 ++++++++++++++++++++++++++ crates/ethereum-forks/src/lib.rs | 4 +++- crates/net/discv4/src/lib.rs | 23 +---------------------- crates/net/discv4/src/proto.rs | 6 +++--- crates/net/dns/src/lib.rs | 15 ++++++++++----- crates/net/network/src/discovery.rs | 4 ++-- 6 files changed, 45 insertions(+), 33 deletions(-) diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index bb163c86e..3be3e3ab8 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -115,6 +115,32 @@ pub struct ForkId { pub next: u64, } +/// Represents a forward-compatible ENR entry for including the forkid in a node record via +/// EIP-868. Forward compatibility is achieved by allowing trailing fields. +/// +/// See: +/// +/// +/// for how geth implements ForkId values and forward compatibility. +#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] +#[rlp(trailing)] +pub struct EnrForkIdEntry { + /// The inner forkid + pub fork_id: ForkId, +} + +impl From for EnrForkIdEntry { + fn from(fork_id: ForkId) -> Self { + Self { fork_id } + } +} + +impl From for ForkId { + fn from(entry: EnrForkIdEntry) -> Self { + entry.fork_id + } +} + /// Reason for rejecting provided `ForkId`. #[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)] pub enum ValidationError { diff --git a/crates/ethereum-forks/src/lib.rs b/crates/ethereum-forks/src/lib.rs index e781fe3a5..6dbec7c38 100644 --- a/crates/ethereum-forks/src/lib.rs +++ b/crates/ethereum-forks/src/lib.rs @@ -20,7 +20,9 @@ mod forkid; mod hardfork; mod head; -pub use forkid::{ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError}; +pub use forkid::{ + EnrForkIdEntry, ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError, +}; pub use hardfork::Hardfork; pub use head::Head; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 3ac6bfa8c..071b81df9 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -28,7 +28,6 @@ use crate::{ error::{DecodePacketError, Discv4Error}, proto::{FindNode, Message, Neighbours, Packet, Ping, Pong}, }; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use discv5::{ kbucket, kbucket::{ @@ -2174,33 +2173,13 @@ pub enum DiscoveryUpdate { Batch(Vec), } -/// Represents a forward-compatible ENR entry for including the forkid in a node record via -/// EIP-868. Forward compatibility is achieved by allowing trailing fields. -/// -/// See: -/// -/// -/// for how geth implements ForkId values and forward compatibility. -#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[rlp(trailing)] -pub struct EnrForkIdEntry { - /// The inner forkid - pub fork_id: ForkId, -} - -impl From for EnrForkIdEntry { - fn from(fork_id: ForkId) -> Self { - Self { fork_id } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{create_discv4, create_discv4_with_config, rng_endpoint, rng_record}; use alloy_rlp::{Decodable, Encodable}; use rand::{thread_rng, Rng}; - use reth_primitives::{hex, mainnet_nodes, ForkHash}; + use reth_primitives::{hex, mainnet_nodes, EnrForkIdEntry, ForkHash}; use std::future::poll_fn; #[tokio::test] diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index bdca3bfb4..059ecc5bb 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,11 +1,11 @@ //! Discovery v4 protocol implementation. -use crate::{error::DecodePacketError, EnrForkIdEntry, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; +use crate::{error::DecodePacketError, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; use enr::Enr; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, pk2id, ForkId, NodeRecord, B256, + keccak256, pk2id, EnrForkIdEntry, ForkId, NodeRecord, B256, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, @@ -261,7 +261,7 @@ impl EnrResponse { /// See also pub fn eth_fork_id(&self) -> Option { let mut maybe_fork_id = self.enr.get_raw_rlp(b"eth")?; - EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(|entry| entry.fork_id) + EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(Into::into) } } diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 03c72e330..b72a45b31 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,7 +22,7 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_primitives::{pk2id, ForkId, NodeRecord}; +use reth_primitives::{pk2id, EnrForkIdEntry, ForkId, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ @@ -400,7 +400,8 @@ fn convert_enr_node_record(enr: &Enr) -> Option } .into_ipv4_mapped(); - let fork_id = enr.get_decodable::(b"eth").transpose().ok()?; + let fork_id = + enr.get_decodable::(b"eth").transpose().ok().flatten().map(Into::into); Some(DnsNodeRecordUpdate { node_record, fork_id, enr: enr.clone() }) } @@ -423,7 +424,7 @@ mod tests { .ip("127.0.0.1".parse().unwrap()) .udp4(9000) .tcp4(30303) - .add_value(b"eth", &MAINNET.latest_fork_id()) + .add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id())) .build(&secret_key) .unwrap(); @@ -446,7 +447,7 @@ mod tests { .ip("127.0.0.1".parse().unwrap()) .udp4(9000) .tcp4(30303) - .add_value(b"eth", &MAINNET.latest_fork_id()) + .add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id())) .add_value(b"opstack", &ForkId { hash: ForkHash(rand::random()), next: rand::random() }) .build(&secret_key) .unwrap(); @@ -510,7 +511,11 @@ mod tests { let mut builder = Enr::builder(); let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); - builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &fork_id); + builder + .ip4(Ipv4Addr::LOCALHOST) + .udp4(30303) + .tcp4(30303) + .add_value(b"eth", &EnrForkIdEntry::from(fork_id)); let enr = builder.build(&secret_key).unwrap(); resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64()); diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index b7a1131b4..d95f2f957 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -7,12 +7,12 @@ use crate::{ }; use enr::Enr; use futures::StreamExt; -use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config, EnrForkIdEntry}; +use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord, PeerId}; use secp256k1::SecretKey; use std::{ collections::VecDeque, From fd46df069ee5c9478a9cc689320e0aeafa9a529f Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Fri, 26 Apr 2024 12:29:43 +0300 Subject: [PATCH 071/250] feat: move db-access example into a separate module (#7902) --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + examples/Cargo.toml | 4 ---- examples/README.md | 2 +- examples/db-access/Cargo.toml | 16 ++++++++++++++++ examples/{db-access.rs => db-access/src/main.rs} | 0 6 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 examples/db-access/Cargo.toml rename examples/{db-access.rs => db-access/src/main.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index 027cd7e85..b67a7935c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2204,6 +2204,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "db-access" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-db", + "reth-primitives", + "reth-provider", + "reth-rpc-types", +] + [[package]] name = "debug-helper" version = "0.3.13" diff --git a/Cargo.toml b/Cargo.toml index 954fd85d8..cee449b22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", + "examples/db-access", "testing/ef-tests/", ] default-members = ["bin/reth"] diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 2379e9a0f..02c571786 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -25,10 +25,6 @@ futures.workspace = true async-trait.workspace = true tokio.workspace = true -[[example]] -name = "db-access" -path = "db-access.rs" - [[example]] name = "network" path = "network.rs" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index dcec15d35..574efe961 100644 --- a/examples/README.md +++ b/examples/README.md @@ -38,7 +38,7 @@ to make a PR! | Example | Description | | --------------------------- | --------------------------------------------------------------- | -| [DB access](./db-access.rs) | Illustrates how to access Reth's database in a separate process | +| [DB access](./db-access) | Illustrates how to access Reth's database in a separate process | ## Network diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml new file mode 100644 index 000000000..e447493c2 --- /dev/null +++ b/examples/db-access/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "db-access" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + + +[dependencies] +reth-db.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-rpc-types.workspace = true + + +eyre.workspace = true diff --git a/examples/db-access.rs b/examples/db-access/src/main.rs similarity index 100% rename from examples/db-access.rs rename to examples/db-access/src/main.rs From 4278bc24ca1e1f0d26ab78a1bd0ecf93d1a4e5a2 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 26 Apr 2024 11:49:35 +0200 Subject: [PATCH 072/250] Bump `alloy-dyn-abi` and `alloy-sol-types` (#7903) --- Cargo.lock | 191 +++++++++++++++++++++++++++++------------------------ Cargo.toml | 4 +- 2 files changed, 105 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b67a7935c..863fada9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40646aa7f01e396139cf0d6c3a7475eeb8094a0f41d8199f10860c8aef09d2f1" +checksum = "fe6c2674230e94ea98767550b02853bf7024b46f784827be95acfc5f5f1a445f" dependencies = [ "alloy-rlp", "arbitrary", @@ -147,9 +147,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872f239c15befa27cc4f0d3d82a70b3365c2d0202562bf906eb93b299fa31882" +checksum = "22ab339ca7b4ea9115f0578c941abc80a171edf8e5eadd01e6c4237b68db8083" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a35ddfd27576474322a5869e4c123e5f3e7b2177297c18e4e82ea501cb125b" +checksum = "44294729c145cf7ae65feab544b5b81fb2bb7e2fd060214842eb3989a1e9d882" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" +checksum = "bef9a94a27345fb31e3fcb5f5e9f592bb4847493b07fa1e47dd9fde2222f2e28" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -467,9 +467,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" +checksum = "c31fe73cd259527e24dc2dbfe64bc95e5ddfcd2b2731f670a11ff72b2be2c25b" dependencies = [ "alloy-json-abi", "const-hex", @@ -484,18 +484,18 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" +checksum = "8c8d6e74e4feeaa2bcfdecfd3da247ab53c67bd654ba1907270c32e02b142331" dependencies = [ "winnow 0.6.6", ] [[package]] name = "alloy-sol-types" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" +checksum = "afaffed78bfb17526375754931e045f96018aa810844b29c7aef823266dd4b4b" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -814,9 +814,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" +checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693" dependencies = [ "brotli", "flate2", @@ -927,9 +927,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -1269,9 +1269,9 @@ dependencies = [ [[package]] name = "brotli" -version = "4.0.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" +checksum = "19483b140a7ac7174d34b5a581b406c64f84da5409d3e09cf4fff604f9270e67" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1280,9 +1280,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" +checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1853,7 +1853,7 @@ dependencies = [ "crossterm_winapi", "libc", "mio", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "signal-hook", "signal-hook-mio", "winapi", @@ -2175,7 +2175,7 @@ dependencies = [ "hashbrown 0.14.3", "lock_api", "once_cell", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -2862,9 +2862,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" [[package]] name = "findshlibs" @@ -4008,9 +4008,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -4079,7 +4079,7 @@ dependencies = [ "futures-util", "hyper 0.14.28", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "rustc-hash", @@ -4352,7 +4352,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -4407,9 +4407,9 @@ dependencies = [ [[package]] name = "libproc" -version = "0.14.6" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb6497078a4c9c2aca63df56d8dce6eb4381d53a960f781a3a748f7ea97436d" +checksum = "ae9ea4b75e1a81675429dafe43441df1caea70081e82246a8cccf514884a88bb" dependencies = [ "bindgen", "errno", @@ -4514,9 +4514,9 @@ checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -5219,12 +5219,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -5243,15 +5243,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -5502,7 +5502,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "smallvec", "symbolic-demangle", "tempfile", @@ -5897,11 +5897,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] @@ -6237,7 +6237,7 @@ dependencies = [ "linked_hash_set", "lru", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "reth-consensus", "reth-db", "reth-interfaces", @@ -6372,7 +6372,7 @@ dependencies = [ "discv5", "enr", "generic-array", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", "reth-net-nat", @@ -6417,7 +6417,7 @@ dependencies = [ "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", "reth-primitives", @@ -6706,7 +6706,7 @@ dependencies = [ "auto_impl", "clap", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-consensus", "reth-eth-wire-types", @@ -6751,7 +6751,7 @@ dependencies = [ "indexmap 2.2.6", "libc", "libffi", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pprof", "rand 0.8.5", "rand_xorshift", @@ -6838,7 +6838,7 @@ dependencies = [ "itertools 0.12.1", "linked_hash_set", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "pprof", "rand 0.8.5", @@ -7081,7 +7081,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "reqwest 0.11.27", "reth", "reth-basic-payload-builder", @@ -7227,7 +7227,7 @@ dependencies = [ "dashmap", "itertools 0.12.1", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "rayon", @@ -7306,7 +7306,7 @@ dependencies = [ "jsonrpsee", "jsonwebtoken", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "reth-consensus-common", @@ -7536,7 +7536,7 @@ version = "0.2.0-beta.6" dependencies = [ "assert_matches", "clap", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rayon", "reth-db", "reth-interfaces", @@ -7602,7 +7602,7 @@ dependencies = [ "futures-util", "itertools 0.12.1", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "paste", "pprof", "proptest", @@ -7956,9 +7956,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -7988,7 +7988,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.3", "subtle", "zeroize", ] @@ -8039,9 +8039,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" [[package]] name = "rustls-webpki" @@ -8055,9 +8055,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -8114,6 +8114,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.23" @@ -8150,6 +8159,12 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "sdd" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" + [[package]] name = "sec1" version = "0.7.3" @@ -8315,11 +8330,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "chrono", "hex", "indexmap 1.9.3", @@ -8333,9 +8348,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" +checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8345,23 +8360,23 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" +checksum = "adb86f9315df5df6a70eae0cc22395a44e544a0d8897586820770a35ede74449" dependencies = [ - "dashmap", "futures", - "lazy_static", "log", - "parking_lot 0.12.1", + "once_cell", + "parking_lot 0.12.2", + "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" +checksum = "a9bb72430492e9549b0c4596725c0f82729bff861c45aa8099c0a8e67fc3b721" dependencies = [ "proc-macro2", "quote", @@ -8483,9 +8498,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8799,9 +8814,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4497156948bd342b52038035a6fa514a89626e37af9d2c52a5e8d8ebcc7ee479" +checksum = "70aba06097b6eda3c15f6eebab8a6339e121475bcf08bbe6758807e716c372a1" dependencies = [ "paste", "proc-macro2", @@ -8939,18 +8954,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", @@ -9096,7 +9111,7 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", @@ -9496,7 +9511,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "resolv-conf", "smallvec", @@ -9901,11 +9916,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index cee449b22..f3ac31674 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,8 +280,8 @@ revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = # eth alloy-chains = "0.1.15" alloy-primitives = "0.7.1" -alloy-dyn-abi = "0.7.0" -alloy-sol-types = "0.7.0" +alloy-dyn-abi = "0.7.1" +alloy-sol-types = "0.7.1" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } From d833f1aed9f54cfee27ab8d6f8d342357ee0260f Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Fri, 26 Apr 2024 15:45:07 +0530 Subject: [PATCH 073/250] feat: add new crate op-beacon-core (#7848) Co-authored-by: Matthias Seitz --- Cargo.lock | 9 ++ Cargo.toml | 4 +- crates/consensus/common/src/validation.rs | 17 +++- crates/optimism/consensus/Cargo.toml | 23 +++++ crates/optimism/consensus/src/lib.rs | 103 ++++++++++++++++++++++ 5 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 crates/optimism/consensus/Cargo.toml create mode 100644 crates/optimism/consensus/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 863fada9c..5262d2c41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7113,6 +7113,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-consensus" +version = "0.2.0-beta.6" +dependencies = [ + "reth-consensus", + "reth-consensus-common", + "reth-primitives", +] + [[package]] name = "reth-optimism-payload-builder" version = "0.2.0-beta.6" diff --git a/Cargo.toml b/Cargo.toml index f3ac31674..817f82993 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ members = [ "crates/ethereum/engine-primitives/", "crates/node-ethereum/", "crates/node-builder/", + "crates/optimism/consensus", "crates/optimism/node/", "crates/optimism/evm/", "crates/node-core/", @@ -222,6 +223,7 @@ reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } +reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } @@ -270,7 +272,7 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } -reth-node-events = { path = "crates/node/events" } +reth-optimism-consensus = { path = "crates/optimism/consensus" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 3ed01f637..06b2303a8 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -3,7 +3,10 @@ use reth_consensus::ConsensusError; use reth_interfaces::RethResult; use reth_primitives::{ - constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, + constants::{ + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, + MAXIMUM_EXTRA_DATA_SIZE, + }, BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, @@ -321,6 +324,18 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons Ok(()) } +/// Validates the header's extradata according to the beacon consensus rules. +/// +/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. +/// This must be 32 bytes or fewer; formally Hx. +pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { + if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { + Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) + } else { + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml new file mode 100644 index 000000000..4ebbaa8d8 --- /dev/null +++ b/crates/optimism/consensus/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-optimism-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-consensus-common.workspace = true +reth-primitives.workspace = true +reth-consensus.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism", +] \ No newline at end of file diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs new file mode 100644 index 000000000..9a905adfa --- /dev/null +++ b/crates/optimism/consensus/src/lib.rs @@ -0,0 +1,103 @@ +//! Optimism Consensus implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus_common::{validation, validation::validate_header_extradata}; +use reth_primitives::{ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256}; +use std::{sync::Arc, time::SystemTime}; + +/// Optimism consensus implementation. +/// +/// Provides basic checks as outlined in the execution specs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct OptimismBeaconConsensus { + /// Configuration + chain_spec: Arc, +} + +impl OptimismBeaconConsensus { + /// Create a new instance of [OptimismBeaconConsensus] + /// + /// # Panics + /// + /// If given chain spec is not optimism [ChainSpec::is_optimism] + pub fn new(chain_spec: Arc) -> Self { + assert!(chain_spec.is_optimism(), "optimism consensus only valid for optimism chains"); + Self { chain_spec } + } +} + +impl Consensus for OptimismBeaconConsensus { + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { + validation::validate_header_standalone(header, &self.chain_spec)?; + Ok(()) + } + + fn validate_header_against_parent( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?; + Ok(()) + } + + fn validate_header_with_total_difficulty( + &self, + header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + // with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached. + let is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); + + if is_post_merge { + if header.nonce != 0 { + return Err(ConsensusError::TheMergeNonceIsNotZero) + } + + if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) + } + + // Post-merge, the consensus layer is expected to perform checks such that the block + // timestamp is a function of the slot. This is different from pre-merge, where blocks + // are only allowed to be in the future (compared to the system's clock) by a certain + // threshold. + // + // Block validation with respect to the parent should ensure that the block timestamp + // is greater than its parent timestamp. + + // validate header extradata for all networks post merge + validate_header_extradata(header)?; + + // mixHash is used instead of difficulty inside EVM + // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty + } else { + // Check if timestamp is in the future. Clock can drift but this can be consensus issue. + let present_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + if header.exceeds_allowed_future_timestamp(present_timestamp) { + return Err(ConsensusError::TimestampIsInFuture { + timestamp: header.timestamp, + present_timestamp, + }) + } + } + + Ok(()) + } + + fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validation::validate_block_standalone(block, &self.chain_spec) + } +} From 688ee06e98774596d7f09283fd68f73303f2f0b6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 26 Apr 2024 12:57:42 +0200 Subject: [PATCH 074/250] fix(discv5): fork id list in ENR (#7909) --- crates/net/discv5/src/config.rs | 13 +++++++------ crates/net/discv5/src/lib.rs | 12 +++++++----- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index bf15be861..266b530ef 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -9,7 +9,7 @@ use std::{ use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; -use reth_primitives::{Bytes, ForkId, NodeRecord, MAINNET}; +use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord, MAINNET}; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, network_key}; @@ -50,7 +50,7 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork, + fork: (network_key, fork_id), tcp_port, other_enr_kv_pairs, lookup_interval, @@ -60,7 +60,7 @@ impl ConfigBuilder { Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some(fork), + fork: Some((network_key, fork_id.fork_id)), tcp_port, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), @@ -160,7 +160,8 @@ impl ConfigBuilder { let discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let fork = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); + let (network_key, fork_id) = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); + let fork = (network_key, fork_id.into()); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); @@ -188,8 +189,8 @@ pub struct Config { /// Nodes to boot from. pub(super) bootstrap_nodes: HashSet, /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node - /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. - pub(super) fork: (&'static [u8], ForkId), + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. + pub(super) fork: (&'static [u8], EnrForkIdEntry), /// RLPx TCP port to advertise. pub(super) tcp_port: u16, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index de74f3fee..14414abf7 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -17,13 +17,12 @@ use std::{ }; use ::enr::Enr; -use alloy_rlp::Decodable; use discv5::ListenConfig; use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; use rand::{Rng, RngCore}; -use reth_primitives::{bytes::Bytes, ForkId, NodeRecord, PeerId}; +use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord, PeerId}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; use tracing::{debug, error, trace}; @@ -489,9 +488,12 @@ impl Discv5 { enr: &discv5::enr::Enr, ) -> Result { let key = self.fork_key; - let mut fork_id_bytes = enr.get_raw_rlp(key).ok_or(Error::ForkMissing(key))?; + let fork_id = enr + .get_decodable::(key) + .ok_or(Error::ForkMissing(key))? + .map(Into::into)?; - Ok(ForkId::decode(&mut fork_id_bytes)?) + Ok(fork_id) } //////////////////////////////////////////////////////////////////////////////////////////////// @@ -834,7 +836,7 @@ mod tests { let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); let decoded_fork_id = - ForkId::decode(&mut enr.get_raw_rlp(network_key::ETH).unwrap()).unwrap(); + enr.get_decodable::(network_key::ETH).unwrap().map(Into::into).unwrap(); assert_eq!(fork_id, decoded_fork_id); assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 From bb0809ce2e9f3d24a757397051ccd080ec79bd3d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 26 Apr 2024 13:14:27 +0200 Subject: [PATCH 075/250] chore: reuse validation fn (#7911) --- crates/consensus/beacon-core/src/lib.rs | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/consensus/beacon-core/src/lib.rs index c2a3df6e6..6ced95dbc 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/consensus/beacon-core/src/lib.rs @@ -11,10 +11,10 @@ use reth_consensus::{Consensus, ConsensusError}; use reth_consensus_common::validation; use reth_primitives::{ - constants::MAXIMUM_EXTRA_DATA_SIZE, Chain, ChainSpec, Hardfork, Header, SealedBlock, - SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; + /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. @@ -87,7 +87,7 @@ impl Consensus for BeaconConsensus { // is greater than its parent timestamp. // validate header extradata for all networks post merge - validate_header_extradata(header)?; + validation::validate_header_extradata(header)?; // mixHash is used instead of difficulty inside EVM // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty @@ -111,7 +111,7 @@ impl Consensus for BeaconConsensus { // * If the network is goerli pre-merge, ignore the extradata check, since we do not // support clique. Same goes for OP blocks below Bedrock. if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { - validate_header_extradata(header)?; + validation::validate_header_extradata(header)?; } } @@ -122,15 +122,3 @@ impl Consensus for BeaconConsensus { validation::validate_block_standalone(block, &self.chain_spec) } } - -/// Validates the header's extradata according to the beacon consensus rules. -/// -/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. -/// This must be 32 bytes or fewer; formally Hx. -fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) - } else { - Ok(()) - } -} From 73ea68692c38690fc93b049cb98cd4839976dba8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 26 Apr 2024 13:31:19 +0200 Subject: [PATCH 076/250] chore: move reqwest to workspace dep (#7910) --- Cargo.lock | 34 ++++++++++++++++++++++++--------- Cargo.toml | 1 + crates/optimism/node/Cargo.toml | 4 +--- crates/optimism/node/src/rpc.rs | 2 +- 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5262d2c41..74431b4c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3534,6 +3534,23 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tower-service", +] + [[package]] name = "hyper-system-resolver" version = "0.5.0" @@ -4100,7 +4117,7 @@ checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" dependencies = [ "async-trait", "hyper 0.14.28", - "hyper-rustls", + "hyper-rustls 0.24.2", "jsonrpsee-core", "jsonrpsee-types", "serde", @@ -5984,7 +6001,6 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", - "hyper-rustls", "ipnet", "js-sys", "log", @@ -5992,16 +6008,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.11", - "rustls-native-certs 0.6.3", - "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "system-configuration", "tokio", - "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -6026,6 +6038,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", + "hyper-rustls 0.26.0", "hyper-util", "ipnet", "js-sys", @@ -6034,11 +6047,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.2", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-rustls 0.25.0", "tower-service", "url", "wasm-bindgen", @@ -7077,12 +7095,10 @@ dependencies = [ "async-trait", "clap", "eyre", - "http 0.2.12", - "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", "parking_lot 0.12.2", - "reqwest 0.11.27", + "reqwest 0.12.4", "reth", "reth-basic-payload-builder", "reth-beacon-consensus", diff --git a/Cargo.toml b/Cargo.toml index 817f82993..bd4fd5fd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -351,6 +351,7 @@ futures = "0.3.26" pin-project = "1.0.12" futures-util = "0.3.25" hyper = "0.14.25" +reqwest = { version = "0.12", default-features = false } tower = "0.4" tower-http = "0.4" http = "0.2.8" diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 29a99a961..be8791c78 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -36,9 +36,7 @@ revm-primitives.workspace = true # async async-trait.workspace = true hyper.workspace = true -http.workspace = true -http-body.workspace = true -reqwest = { version = "0.11", default-features = false, features = [ +reqwest = { workspace = true, default-features = false, features = [ "rustls-tls-native-roots", ] } tracing.workspace = true diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 25a399e18..515e1d8eb 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -95,7 +95,7 @@ impl SequencerClient { self.http_client() .post(self.endpoint()) - .header(http::header::CONTENT_TYPE, "application/json") + .header(reqwest::header::CONTENT_TYPE, "application/json") .body(body) .send() .await From b6b2cf816e3f39f0e9963912b48384352a27112b Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Fri, 26 Apr 2024 21:34:34 +0800 Subject: [PATCH 077/250] Add windows Ipc Client implementation (#7187) --- Cargo.lock | 185 ++++++++++++-- crates/node-builder/src/rpc.rs | 1 - crates/node-core/src/args/rpc_server_args.rs | 2 +- crates/rpc/ipc/Cargo.toml | 6 +- crates/rpc/ipc/src/client.rs | 151 ------------ crates/rpc/ipc/src/client/mod.rs | 97 ++++++++ crates/rpc/ipc/src/client/unix.rs | 82 +++++++ crates/rpc/ipc/src/client/win.rs | 82 +++++++ crates/rpc/ipc/src/lib.rs | 1 - crates/rpc/ipc/src/server/connection.rs | 55 +---- crates/rpc/ipc/src/server/future.rs | 129 +--------- crates/rpc/ipc/src/server/mod.rs | 246 ++++++++++--------- crates/rpc/rpc-builder/src/auth.rs | 14 +- crates/rpc/rpc-builder/src/lib.rs | 43 +--- deny.toml | 1 + 15 files changed, 580 insertions(+), 515 deletions(-) delete mode 100644 crates/rpc/ipc/src/client.rs create mode 100644 crates/rpc/ipc/src/client/mod.rs create mode 100644 crates/rpc/ipc/src/client/unix.rs create mode 100644 crates/rpc/ipc/src/client/win.rs diff --git a/Cargo.lock b/Cargo.lock index 74431b4c0..5a68ee773 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -808,10 +808,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] +[[package]] +name = "async-channel" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +dependencies = [ + "concurrent-queue", + "event-listener 5.3.0", + "event-listener-strategy 0.5.1", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.9" @@ -828,14 +841,25 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + [[package]] name = "async-sse" version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ - "async-channel", - "futures-lite", + "async-channel 1.9.0", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -864,6 +888,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + [[package]] name = "async-trait" version = "0.1.80" @@ -875,6 +905,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -1122,6 +1158,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.1", + "async-lock", + "async-task", + "fastrand 2.0.2", + "futures-io", + "futures-lite 2.3.0", + "piper", + "tracing", +] + [[package]] name = "blst" version = "0.3.11" @@ -2727,6 +2779,48 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" +dependencies = [ + "event-listener 5.3.0", + "pin-project-lite", +] + [[package]] name = "examples" version = "0.0.0" @@ -2992,6 +3086,16 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "futures-core", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -3428,9 +3532,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel", + "async-channel 1.9.0", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -3951,6 +4055,33 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "interprocess" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81f2533f3be42fffe3b5e63b71aeca416c1c3bc33e4e27be018521e76b1f38fb" +dependencies = [ + "blocking", + "cfg-if", + "futures-core", + "futures-io", + "intmap", + "libc", + "once_cell", + "rustc_version 0.4.0", + "spinning", + "thiserror", + "to_method", + "tokio", + "winapi", +] + +[[package]] +name = "intmap" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae52f28f45ac2bc96edb7714de995cffc174a395fb0abf5bff453587c980d7b9" + [[package]] name = "intrusive-collections" version = "0.9.6" @@ -5203,20 +5334,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parity-tokio-ipc" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" -dependencies = [ - "futures", - "libc", - "log", - "rand 0.7.3", - "tokio", - "winapi", -] - [[package]] name = "parking" version = "2.2.0" @@ -5400,6 +5517,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.2", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -6744,9 +6872,10 @@ dependencies = [ "bytes", "futures", "futures-util", + "interprocess", "jsonrpsee", - "parity-tokio-ipc", "pin-project", + "rand 0.8.5", "reth-tracing", "serde_json", "thiserror", @@ -6755,6 +6884,7 @@ dependencies = [ "tokio-util", "tower", "tracing", + "windows-sys 0.52.0", ] [[package]] @@ -8657,6 +8787,15 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spinning" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4f0e86297cad2658d92a707320d87bf4e6ae1050287f51d19b67ef3f153a7b" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" @@ -9125,6 +9264,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "to_method" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c4ceeeca15c8384bbc3e011dbd8fccb7f068a440b752b7d9b32ceb0ca0e2e8" + [[package]] name = "tokio" version = "1.37.0" diff --git a/crates/node-builder/src/rpc.rs b/crates/node-builder/src/rpc.rs index d6e2eb0f2..3efeba7f5 100644 --- a/crates/node-builder/src/rpc.rs +++ b/crates/node-builder/src/rpc.rs @@ -301,7 +301,6 @@ where let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { let addr = handle.local_addr(); if let Some(ipc_endpoint) = handle.ipc_endpoint() { - let ipc_endpoint = ipc_endpoint.path(); info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); } else { info!(target: "reth::cli", url=%addr, "RPC auth server started"); diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server_args.rs index 2ac48e2ba..1a60aa31a 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server_args.rs @@ -711,7 +711,7 @@ mod tests { config.ws_address().unwrap(), SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8888)) ); - assert_eq!(config.ipc_endpoint().unwrap().path(), constants::DEFAULT_IPC_ENDPOINT); + assert_eq!(config.ipc_endpoint().unwrap(), constants::DEFAULT_IPC_ENDPOINT); } #[test] diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 21b645409..094fa5759 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -15,7 +15,6 @@ workspace = true # async/net futures.workspace = true -parity-tokio-ipc = "0.9.0" tokio = { workspace = true, features = ["net", "time", "rt-multi-thread"] } tokio-util = { workspace = true, features = ["codec"] } tokio-stream.workspace = true @@ -30,7 +29,12 @@ tracing.workspace = true bytes.workspace = true thiserror.workspace = true futures-util = "0.3.30" +interprocess = { version = "1.2.1", features = ["tokio_support"] } + +[target.'cfg(windows)'.dependencies] +windows-sys = { version = "0.52.0", features = ["Win32_Foundation"] } [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } reth-tracing.workspace = true +rand.workspace = true diff --git a/crates/rpc/ipc/src/client.rs b/crates/rpc/ipc/src/client.rs deleted file mode 100644 index f4454958f..000000000 --- a/crates/rpc/ipc/src/client.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! [`jsonrpsee`] transport adapter implementation for IPC. - -use crate::stream_codec::StreamCodec; -use futures::StreamExt; -use jsonrpsee::{ - async_client::{Client, ClientBuilder}, - core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}, -}; -use std::{ - io, - path::{Path, PathBuf}, -}; -use tokio::{io::AsyncWriteExt, net::UnixStream}; -use tokio_util::codec::FramedRead; - -/// Builder type for [`Client`] -#[derive(Clone, Default, Debug)] -#[non_exhaustive] -pub struct IpcClientBuilder; - -impl IpcClientBuilder { - /// Connects to a IPC socket - pub async fn build(self, path: impl AsRef) -> Result { - let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; - Ok(self.build_with_tokio(tx, rx)) - } - - /// Uses the sender and receiver channels to connect to the socket. - pub fn build_with_tokio(self, sender: S, receiver: R) -> Client - where - S: TransportSenderT + Send, - R: TransportReceiverT + Send, - { - ClientBuilder::default().build_with_tokio(sender, receiver) - } -} - -/// Sending end of IPC transport. -#[derive(Debug)] -pub struct Sender { - inner: tokio::net::unix::OwnedWriteHalf, -} - -#[async_trait::async_trait] -impl TransportSenderT for Sender { - type Error = IpcError; - - /// Sends out a request. Returns a Future that finishes when the request has been successfully - /// sent. - async fn send(&mut self, msg: String) -> Result<(), Self::Error> { - Ok(self.inner.write_all(msg.as_bytes()).await?) - } - - async fn send_ping(&mut self) -> Result<(), Self::Error> { - tracing::trace!("send ping - not implemented"); - Err(IpcError::NotSupported) - } - - /// Close the connection. - async fn close(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Receiving end of IPC transport. -#[derive(Debug)] -pub struct Receiver { - inner: FramedRead, -} - -#[async_trait::async_trait] -impl TransportReceiverT for Receiver { - type Error = IpcError; - - /// Returns a Future resolving when the server sent us something back. - async fn receive(&mut self) -> Result { - self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) - } -} - -/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct IpcTransportClientBuilder; - -impl IpcTransportClientBuilder { - /// Try to establish the connection. - /// - /// ``` - /// use jsonrpsee::{core::client::ClientT, rpc_params}; - /// use reth_ipc::client::IpcClientBuilder; - /// # async fn run_client() -> Result<(), Box> { - /// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?; - /// let response: String = client.request("say_hello", rpc_params![]).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { - let path = path.as_ref(); - - let stream = UnixStream::connect(path) - .await - .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; - - let (rhlf, whlf) = stream.into_split(); - - Ok(( - Sender { inner: whlf }, - Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, - )) - } -} - -/// Error variants that can happen in IPC transport. -#[derive(Debug, thiserror::Error)] -pub enum IpcError { - /// Operation not supported - #[error("operation not supported")] - NotSupported, - /// Stream was closed - #[error("stream closed")] - Closed, - /// Thrown when failed to establish a socket connection. - #[error("failed to connect to socket {path}: {err}")] - FailedToConnect { - /// The path of the socket. - #[doc(hidden)] - path: PathBuf, - /// The error occurred while connecting. - #[doc(hidden)] - err: io::Error, - }, - /// Wrapped IO Error - #[error(transparent)] - Io(#[from] io::Error), -} - -#[cfg(test)] -mod tests { - use super::*; - use parity_tokio_ipc::{dummy_endpoint, Endpoint}; - - #[tokio::test] - async fn test_connect() { - let endpoint = dummy_endpoint(); - let _incoming = Endpoint::new(endpoint.clone()).incoming().unwrap(); - - let (tx, rx) = IpcTransportClientBuilder::default().build(endpoint).await.unwrap(); - let _ = IpcClientBuilder::default().build_with_tokio(tx, rx); - } -} diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs new file mode 100644 index 000000000..8ca4b5406 --- /dev/null +++ b/crates/rpc/ipc/src/client/mod.rs @@ -0,0 +1,97 @@ +//! [`jsonrpsee`] transport adapter implementation for IPC. + +use std::{ + io, + path::{Path, PathBuf}, +}; + +use jsonrpsee::{ + async_client::{Client, ClientBuilder}, + core::client::{TransportReceiverT, TransportSenderT}, +}; + +#[cfg(unix)] +use crate::client::unix::IpcTransportClientBuilder; +#[cfg(windows)] +use crate::client::win::IpcTransportClientBuilder; + +#[cfg(unix)] +mod unix; +#[cfg(windows)] +mod win; + +/// Builder type for [`Client`] +#[derive(Clone, Default, Debug)] +#[non_exhaustive] +pub struct IpcClientBuilder; + +impl IpcClientBuilder { + /// Connects to a IPC socket + /// + /// ``` + /// use jsonrpsee::{core::client::ClientT, rpc_params}; + /// use reth_ipc::client::IpcClientBuilder; + /// # async fn run_client() -> Result<(), Box> { + /// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?; + /// let response: String = client.request("say_hello", rpc_params![]).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn build(self, path: impl AsRef) -> Result { + let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; + Ok(self.build_with_tokio(tx, rx)) + } + + /// Uses the sender and receiver channels to connect to the socket. + pub fn build_with_tokio(self, sender: S, receiver: R) -> Client + where + S: TransportSenderT + Send, + R: TransportReceiverT + Send, + { + ClientBuilder::default().build_with_tokio(sender, receiver) + } +} + +/// Error variants that can happen in IPC transport. +#[derive(Debug, thiserror::Error)] +pub enum IpcError { + /// Operation not supported + #[error("operation not supported")] + NotSupported, + /// Stream was closed + #[error("stream closed")] + Closed, + /// Thrown when failed to establish a socket connection. + #[error("failed to connect to socket {path}: {err}")] + FailedToConnect { + /// The path of the socket. + #[doc(hidden)] + path: PathBuf, + /// The error occurred while connecting. + #[doc(hidden)] + err: io::Error, + }, + /// Wrapped IO Error + #[error(transparent)] + Io(#[from] io::Error), +} + +#[cfg(test)] +mod tests { + use crate::server::dummy_endpoint; + use interprocess::local_socket::tokio::LocalSocketListener; + + use super::*; + + #[tokio::test] + async fn test_connect() { + let endpoint = dummy_endpoint(); + let binding = LocalSocketListener::bind(endpoint.clone()).unwrap(); + tokio::spawn(async move { + let _x = binding.accept().await; + }); + + let (tx, rx) = IpcTransportClientBuilder::default().build(endpoint).await.unwrap(); + let _ = IpcClientBuilder::default().build_with_tokio(tx, rx); + } +} diff --git a/crates/rpc/ipc/src/client/unix.rs b/crates/rpc/ipc/src/client/unix.rs new file mode 100644 index 000000000..c7ed7bc7a --- /dev/null +++ b/crates/rpc/ipc/src/client/unix.rs @@ -0,0 +1,82 @@ +//! [`jsonrpsee`] transport adapter implementation for Unix IPC by using Unix Sockets. + +use crate::{client::IpcError, stream_codec::StreamCodec}; +use futures::StreamExt; +use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; +use std::path::Path; +use tokio::{ + io::AsyncWriteExt, + net::{ + unix::{OwnedReadHalf, OwnedWriteHalf}, + UnixStream, + }, +}; +use tokio_util::codec::FramedRead; + +/// Sending end of IPC transport. +#[derive(Debug)] +pub(crate) struct Sender { + inner: OwnedWriteHalf, +} + +#[async_trait::async_trait] +impl TransportSenderT for Sender { + type Error = IpcError; + + /// Sends out a request. Returns a Future that finishes when the request has been successfully + /// sent. + async fn send(&mut self, msg: String) -> Result<(), Self::Error> { + Ok(self.inner.write_all(msg.as_bytes()).await?) + } + + async fn send_ping(&mut self) -> Result<(), Self::Error> { + tracing::trace!("send ping - not implemented"); + Err(IpcError::NotSupported) + } + + /// Close the connection. + async fn close(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Receiving end of IPC transport. +#[derive(Debug)] +pub(crate) struct Receiver { + pub(crate) inner: FramedRead, +} + +#[async_trait::async_trait] +impl TransportReceiverT for Receiver { + type Error = IpcError; + + /// Returns a Future resolving when the server sent us something back. + async fn receive(&mut self) -> Result { + self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) + } +} + +/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub(crate) struct IpcTransportClientBuilder; + +impl IpcTransportClientBuilder { + pub(crate) async fn build( + self, + path: impl AsRef, + ) -> Result<(Sender, Receiver), IpcError> { + let path = path.as_ref(); + + let stream = UnixStream::connect(path) + .await + .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; + + let (rhlf, whlf) = stream.into_split(); + + Ok(( + Sender { inner: whlf }, + Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, + )) + } +} diff --git a/crates/rpc/ipc/src/client/win.rs b/crates/rpc/ipc/src/client/win.rs new file mode 100644 index 000000000..69b3140fe --- /dev/null +++ b/crates/rpc/ipc/src/client/win.rs @@ -0,0 +1,82 @@ +//! [`jsonrpsee`] transport adapter implementation for Windows IPC by using NamedPipes. + +use crate::{client::IpcError, stream_codec::StreamCodec}; +use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; +use std::{path::Path, sync::Arc}; +use tokio::{ + io::AsyncWriteExt, + net::windows::named_pipe::{ClientOptions, NamedPipeClient}, + time, + time::Duration, +}; +use tokio_stream::StreamExt; +use tokio_util::codec::FramedRead; +use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; + +/// Sending end of IPC transport. +#[derive(Debug)] +pub struct Sender { + inner: Arc, +} + +#[async_trait::async_trait] +impl TransportSenderT for Sender { + type Error = IpcError; + + /// Sends out a request. Returns a Future that finishes when the request has been successfully + /// sent. + async fn send(&mut self, msg: String) -> Result<(), Self::Error> { + Ok(self.inner.write_all(msg.as_bytes()).await?) + } + + async fn send_ping(&mut self) -> Result<(), Self::Error> { + tracing::trace!("send ping - not implemented"); + Err(IpcError::NotSupported) + } + + /// Close the connection. + async fn close(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Receiving end of IPC transport. +#[derive(Debug)] +pub struct Receiver { + inner: FramedRead, StreamCodec>, +} + +#[async_trait::async_trait] +impl TransportReceiverT for Receiver { + type Error = IpcError; + + /// Returns a Future resolving when the server sent us something back. + async fn receive(&mut self) -> Result { + self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) + } +} + +/// Builder for IPC transport [`crate::client::win::Sender`] and [`crate::client::win::Receiver`] +/// pair. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct IpcTransportClientBuilder; + +impl IpcTransportClientBuilder { + pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { + let addr = path.as_ref().as_os_str(); + let client = loop { + match ClientOptions::new().open(addr) { + Ok(client) => break client, + Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), + Err(e) => return IpcError::FailedToConnect { path: path.to_path_buf(), err: e }, + } + time::sleep(Duration::from_mills(50)).await; + }; + let client = Arc::new(client); + Ok(( + Sender { inner: client.clone() }, + Receiver { inner: FramedRead::new(client, StreamCodec::stream_incoming()) }, + )) + } +} diff --git a/crates/rpc/ipc/src/lib.rs b/crates/rpc/ipc/src/lib.rs index 2d0193ed6..ae7a8b221 100644 --- a/crates/rpc/ipc/src/lib.rs +++ b/crates/rpc/ipc/src/lib.rs @@ -12,7 +12,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#[cfg(unix)] pub mod client; pub mod server; diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index abeba7bbf..05f7a53a9 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,12 +1,11 @@ //! A IPC connection. use crate::stream_codec::StreamCodec; -use futures::{ready, stream::FuturesUnordered, FutureExt, Sink, Stream, StreamExt}; +use futures::{stream::FuturesUnordered, FutureExt, Sink, Stream}; use std::{ collections::VecDeque, future::Future, io, - marker::PhantomData, pin::Pin, task::{Context, Poll}, }; @@ -16,58 +15,8 @@ use tower::Service; pub(crate) type JsonRpcStream = Framed; -/// Wraps a stream of incoming connections. #[pin_project::pin_project] -pub(crate) struct Incoming { - #[pin] - inner: T, - _marker: PhantomData, -} -impl Incoming -where - T: Stream> + Unpin + 'static, - Item: AsyncRead + AsyncWrite, -{ - /// Create a new instance. - pub(crate) fn new(inner: T) -> Self { - Self { inner, _marker: Default::default() } - } - - /// Polls to accept a new incoming connection to the endpoint. - pub(crate) fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - Poll::Ready(ready!(self.poll_next_unpin(cx)).map_or( - Err(io::Error::new(io::ErrorKind::ConnectionAborted, "ipc connection closed")), - |conn| conn, - )) - } -} - -impl Stream for Incoming -where - T: Stream> + 'static, - Item: AsyncRead + AsyncWrite, -{ - type Item = io::Result>>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - let res = match ready!(this.inner.poll_next(cx)) { - Some(Ok(item)) => { - let framed = IpcConn(tokio_util::codec::Decoder::framed( - StreamCodec::stream_incoming(), - item, - )); - Ok(framed) - } - Some(Err(err)) => Err(err), - None => return Poll::Ready(None), - }; - Poll::Ready(Some(res)) - } -} - -#[pin_project::pin_project] -pub(crate) struct IpcConn(#[pin] T); +pub(crate) struct IpcConn(#[pin] pub(crate) T); impl IpcConn> where diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index 65aaccc88..f807af449 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -26,127 +26,9 @@ //! Utilities for handling async code. -use futures::FutureExt; -use std::{ - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::{ - sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}, - time::{self, Duration, Interval}, -}; +use std::sync::Arc; -/// Polling for server stop monitor interval in milliseconds. -const STOP_MONITOR_POLLING_INTERVAL: Duration = Duration::from_millis(1000); - -/// This is a flexible collection of futures that need to be driven to completion -/// alongside some other future, such as connection handlers that need to be -/// handled along with a listener for new connections. -/// -/// In order to `.await` on these futures and drive them to completion, call -/// `select_with` providing some other future, the result of which you need. -pub(crate) struct FutureDriver { - futures: Vec, - stop_monitor_heartbeat: Interval, -} - -impl Default for FutureDriver { - fn default() -> Self { - let mut heartbeat = time::interval(STOP_MONITOR_POLLING_INTERVAL); - - heartbeat.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - - FutureDriver { futures: Vec::new(), stop_monitor_heartbeat: heartbeat } - } -} - -impl FutureDriver { - /// Add a new future to this driver - pub(crate) fn add(&mut self, future: F) { - self.futures.push(future); - } -} - -impl FutureDriver -where - F: Future + Unpin, -{ - pub(crate) async fn select_with(&mut self, selector: S) -> S::Output { - tokio::pin!(selector); - - DriverSelect { selector, driver: self }.await - } - - fn drive(&mut self, cx: &mut Context<'_>) { - let mut i = 0; - - while i < self.futures.len() { - if self.futures[i].poll_unpin(cx).is_ready() { - // Using `swap_remove` since we don't care about ordering, - // but we do care about removing being `O(1)`. - // - // We don't increment `i` in this branch, since we now - // have a shorter length, and potentially a new value at - // current index - self.futures.swap_remove(i); - } else { - i += 1; - } - } - } - - fn poll_stop_monitor_heartbeat(&mut self, cx: &mut Context<'_>) { - // We don't care about the ticks of the heartbeat, it's here only - // to periodically wake the `Waker` on `cx`. - let _ = self.stop_monitor_heartbeat.poll_tick(cx); - } -} - -impl Future for FutureDriver -where - F: Future + Unpin, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = Pin::into_inner(self); - - this.drive(cx); - - if this.futures.is_empty() { - Poll::Ready(()) - } else { - Poll::Pending - } - } -} - -/// This is a glorified select `Future` that will attempt to drive all -/// connection futures `F` to completion on each `poll`, while also -/// handling incoming connections. -struct DriverSelect<'a, S, F> { - selector: S, - driver: &'a mut FutureDriver, -} - -impl<'a, R, F> Future for DriverSelect<'a, R, F> -where - R: Future + Unpin, - F: Future + Unpin, -{ - type Output = R::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = Pin::into_inner(self); - - this.driver.drive(cx); - this.driver.poll_stop_monitor_heartbeat(cx); - - this.selector.poll_unpin(cx) - } -} +use tokio::sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}; #[derive(Debug, Clone)] pub(crate) struct StopHandle(watch::Receiver<()>); @@ -156,12 +38,7 @@ impl StopHandle { Self(rx) } - pub(crate) fn shutdown_requested(&self) -> bool { - // if a message has been seen, it means that `stop` has been called. - self.0.has_changed().unwrap_or(true) - } - - pub(crate) async fn shutdown(&mut self) { + pub(crate) async fn shutdown(mut self) { // Err(_) implies that the `sender` has been dropped. // Ok(_) implies that `stop` has been called. let _ = self.0.changed().await; diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 5301c7d21..c876457e1 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -1,14 +1,16 @@ //! JSON-RPC IPC server implementation use crate::server::{ - connection::{Incoming, IpcConn, JsonRpcStream}, - future::{ConnectionGuard, FutureDriver, StopHandle}, + connection::{IpcConn, JsonRpcStream}, + future::{ConnectionGuard, StopHandle}, }; -use futures::{FutureExt, Stream, StreamExt}; +use futures::StreamExt; +use futures_util::{future::Either, stream::FuturesUnordered}; +use interprocess::local_socket::tokio::{LocalSocketListener, LocalSocketStream}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, server::{ - middleware::rpc::{either::Either, RpcLoggerLayer, RpcServiceT}, + middleware::rpc::{RpcLoggerLayer, RpcServiceT}, AlreadyStoppedError, IdProvider, RandomIntegerIdProvider, }, BoundedSubscriptions, MethodSink, Methods, @@ -25,16 +27,18 @@ use tokio::{ sync::{oneshot, watch, OwnedSemaphorePermit}, }; use tower::{layer::util::Identity, Layer, Service}; -use tracing::{debug, trace, warn}; - +use tracing::{debug, trace, warn, Instrument}; // re-export so can be used during builder setup -use crate::server::{ - connection::IpcConnDriver, - rpc_service::{RpcService, RpcServiceCfg}, +use crate::{ + server::{ + connection::IpcConnDriver, + rpc_service::{RpcService, RpcServiceCfg}, + }, + stream_codec::StreamCodec, }; -pub use parity_tokio_ipc::Endpoint; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; +use tokio_util::compat::FuturesAsyncReadCompatExt; use tower::layer::{util::Stack, LayerFn}; mod connection; @@ -47,7 +51,7 @@ mod rpc_service; // This is an adapted `jsonrpsee` Server, but for `Ipc` connections. pub struct IpcServer { /// The endpoint we listen for incoming transactions - endpoint: Endpoint, + endpoint: String, id_provider: Arc, cfg: Settings, rpc_middleware: RpcServiceBuilder, @@ -55,9 +59,9 @@ pub struct IpcServer { } impl IpcServer { - /// Returns the configured [Endpoint] - pub fn endpoint(&self) -> &Endpoint { - &self.endpoint + /// Returns the configured endpoint + pub fn endpoint(&self) -> String { + self.endpoint.clone() } } @@ -123,15 +127,29 @@ where stop_handle: StopHandle, on_ready: oneshot::Sender>, ) { - trace!(endpoint = ?self.endpoint.path(), "starting ipc server"); + trace!(endpoint = ?self.endpoint, "starting ipc server"); if cfg!(unix) { // ensure the file does not exist - if std::fs::remove_file(self.endpoint.path()).is_ok() { - debug!(endpoint = ?self.endpoint.path(), "removed existing IPC endpoint file"); + if std::fs::remove_file(&self.endpoint).is_ok() { + debug!(endpoint = ?self.endpoint, "removed existing IPC endpoint file"); } } + let listener = match LocalSocketListener::bind(self.endpoint.clone()) { + Err(err) => { + on_ready + .send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err })) + .ok(); + return; + } + + Ok(listener) => listener, + }; + + // signal that we're ready to accept connections + on_ready.send(Ok(())).ok(); + let message_buffer_capacity = self.cfg.message_buffer_capacity; let max_request_body_size = self.cfg.max_request_body_size; let max_response_body_size = self.cfg.max_response_body_size; @@ -142,37 +160,27 @@ where let mut id: u32 = 0; let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); - let mut connections = FutureDriver::default(); - let endpoint_path = self.endpoint.path().to_string(); - let incoming = match self.endpoint.incoming() { - Ok(connections) => { - #[cfg(windows)] - let connections = Box::pin(connections); - Incoming::new(connections) - } - Err(err) => { - on_ready - .send(Err(IpcServerStartError { endpoint: endpoint_path, source: err })) - .ok(); - return - } - }; - // signal that we're ready to accept connections - on_ready.send(Ok(())).ok(); - - let mut incoming = Monitored::new(incoming, &stop_handle); + let mut connections = FuturesUnordered::new(); + let stopped = stop_handle.clone().shutdown(); + tokio::pin!(stopped); trace!("accepting ipc connections"); loop { - match connections.select_with(&mut incoming).await { - Ok(ipc) => { + match try_accept_conn(&listener, stopped).await { + AcceptConnection::Established { local_socket_stream, stop } => { trace!("established new connection"); + let ipc = IpcConn(tokio_util::codec::Decoder::framed( + StreamCodec::stream_incoming(), + local_socket_stream.compat(), + )); + let conn = match connection_guard.try_acquire() { Some(conn) => conn, None => { warn!("Too many IPC connections. Please try again later."); - connections.add(ipc.reject_connection().boxed()); - continue + connections.push(tokio::spawn(ipc.reject_connection().in_current_span())); + stopped = stop; + continue; } }; @@ -198,30 +206,58 @@ where }; let service = self.http_middleware.service(tower_service); - connections.add(Box::pin(spawn_connection( + connections.push(tokio::spawn(process_connection( ipc, service, stop_handle.clone(), rx, - ))); + ).in_current_span())); id = id.wrapping_add(1); + stopped = stop; } - Err(MonitoredError::Selector(err)) => { - tracing::error!("Error while awaiting a new IPC connection: {:?}", err); + AcceptConnection::Shutdown => { break; } + AcceptConnection::Err((e, stop)) => { + tracing::error!("Error while awaiting a new IPC connection: {:?}", e); + stopped = stop; } - Err(MonitoredError::Shutdown) => break, } } - connections.await; + // FuturesUnordered won't poll anything until this line but because the + // tasks are spawned (so that they can progress independently) + // then this just makes sure that all tasks are completed before + // returning from this function. + while connections.next().await.is_some() {} + } +} + +enum AcceptConnection { + Shutdown, + Established { local_socket_stream: LocalSocketStream, stop: S }, + Err((io::Error, S)), +} + +async fn try_accept_conn(listener: &LocalSocketListener, stopped: S) -> AcceptConnection +where + S: Future + Unpin, +{ + let accept = listener.accept(); + tokio::pin!(accept); + + match futures_util::future::select(accept, stopped).await { + Either::Left((res, stop)) => match res { + Ok(local_socket_stream) => AcceptConnection::Established { local_socket_stream, stop }, + Err(e) => AcceptConnection::Err((e, stop)), + }, + Either::Right(_) => AcceptConnection::Shutdown, } } impl std::fmt::Debug for IpcServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("IpcServer") - .field("endpoint", &self.endpoint.path()) + .field("endpoint", &self.endpoint) .field("cfg", &self.cfg) .field("id_provider", &self.id_provider) .finish() @@ -408,10 +444,10 @@ where } /// Spawns the IPC connection onto a new task -async fn spawn_connection( +async fn process_connection( conn: IpcConn>, service: S, - mut stop_handle: StopHandle, + stop_handle: StopHandle, rx: mpsc::Receiver, ) where S: Service> + Send + 'static, @@ -419,70 +455,34 @@ async fn spawn_connection( S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - let task = tokio::task::spawn(async move { - let rx_item = ReceiverStream::new(rx); - let conn = IpcConnDriver { - conn, - service, - pending_calls: Default::default(), - items: Default::default(), - }; - tokio::pin!(conn, rx_item); - - loop { - tokio::select! { - _ = &mut conn => { - break - } - item = rx_item.next() => { - if let Some(item) = item { - conn.push_back(item); - } - } - _ = stop_handle.shutdown() => { - // shutdown - break - } - } - } - }); - - task.await.ok(); -} - -/// This is a glorified select listening for new messages, while also checking the `stop_receiver` -/// signal. -struct Monitored<'a, F> { - future: F, - stop_monitor: &'a StopHandle, -} - -impl<'a, F> Monitored<'a, F> { - fn new(future: F, stop_monitor: &'a StopHandle) -> Self { - Monitored { future, stop_monitor } - } -} - -enum MonitoredError { - Shutdown, - Selector(E), -} + let rx_item = ReceiverStream::new(rx); + let conn = IpcConnDriver { + conn, + service, + pending_calls: Default::default(), + items: Default::default(), + }; + tokio::pin!(conn, rx_item); -impl<'a, T, Item> Future for Monitored<'a, Incoming> -where - T: Stream> + Unpin + 'static, - Item: AsyncRead + AsyncWrite, -{ - type Output = Result>, MonitoredError>; + let stopped = stop_handle.shutdown(); - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); + tokio::pin!(stopped); - if this.stop_monitor.shutdown_requested() { - return Poll::Ready(Err(MonitoredError::Shutdown)) + loop { + tokio::select! { + _ = &mut conn => { + break + } + item = rx_item.next() => { + if let Some(item) = item { + conn.push_back(item); + } + } + _ = &mut stopped=> { + // shutdown + break + } } - - this.future.poll_accept(cx).map_err(MonitoredError::Selector) } } @@ -734,17 +734,8 @@ impl Builder { /// Finalize the configuration of the server. Consumes the [`Builder`]. pub fn build(self, endpoint: impl AsRef) -> IpcServer { - let endpoint = Endpoint::new(endpoint.as_ref().to_string()); - self.build_with_endpoint(endpoint) - } - - /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build_with_endpoint( - self, - endpoint: Endpoint, - ) -> IpcServer { IpcServer { - endpoint, + endpoint: endpoint.as_ref().to_string(), cfg: self.settings, id_provider: self.id_provider, http_middleware: self.http_middleware, @@ -782,7 +773,18 @@ impl ServerHandle { } } -#[cfg(all(test, unix))] +/// For testing/examples +#[cfg(test)] +pub fn dummy_endpoint() -> String { + let num: u64 = rand::Rng::gen(&mut rand::thread_rng()); + if cfg!(windows) { + format!(r"\\.\pipe\my-pipe-{}", num) + } else { + format!(r"/tmp/my-uds-{}", num) + } +} + +#[cfg(test)] mod tests { use super::*; use crate::client::IpcClientBuilder; @@ -797,7 +799,6 @@ mod tests { types::Request, PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; - use parity_tokio_ipc::dummy_endpoint; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -823,7 +824,7 @@ mod tests { // and you might want to do something smarter if it's // critical that "the most recent item" must be sent when it is produced. if sink.send(notif).await.is_err() { - break Ok(()) + break Ok(()); } closed = c; @@ -848,6 +849,7 @@ mod tests { #[tokio::test] async fn can_set_the_max_response_body_size() { + // init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().max_response_body_size(100).build(&endpoint); let mut module = RpcModule::new(()); diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index cd21be271..372617257 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -13,7 +13,7 @@ use jsonrpsee::{ server::{AlreadyStoppedError, RpcModule}, Methods, }; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +pub use reth_ipc::server::Builder as IpcServerBuilder; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; @@ -205,8 +205,7 @@ impl AuthServerConfig { let ipc_endpoint_str = ipc_endpoint .clone() .unwrap_or_else(|| constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string()); - let ipc_path = Endpoint::new(ipc_endpoint_str); - let ipc_server = ipc_server_config.build(ipc_path.path()); + let ipc_server = ipc_server_config.build(ipc_endpoint_str); let res = ipc_server .start(module.inner) .await @@ -449,7 +448,7 @@ impl AuthServerHandle { if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { return Some( IpcClientBuilder::default() - .build(Endpoint::new(ipc_endpoint).path()) + .build(ipc_endpoint) .await .expect("Failed to create ipc client"), ) @@ -463,10 +462,7 @@ impl AuthServerHandle { } /// Return an ipc endpoint - pub fn ipc_endpoint(&self) -> Option { - if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { - return Some(Endpoint::new(ipc_endpoint)) - } - None + pub fn ipc_endpoint(&self) -> Option { + self.ipc_endpoint.clone() } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4b9159e2d..9c28353c9 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -172,7 +172,7 @@ use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_ipc::server::IpcServer; pub use reth_ipc::server::{ - Builder as IpcServerBuilder, Endpoint, RpcServiceBuilder as IpcRpcServiceBuilder, + Builder as IpcServerBuilder, RpcServiceBuilder as IpcRpcServiceBuilder, }; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ @@ -1459,7 +1459,7 @@ where /// /// Once the [RpcModule] is built via [RpcModuleBuilder] the servers can be started, See also /// [ServerBuilder::build] and [Server::start](jsonrpsee::server::Server::start). -#[derive(Default)] +#[derive(Default, Debug)] pub struct RpcServerConfig { /// Configs for JSON-RPC Http. http_server_config: Option>, @@ -1476,26 +1476,11 @@ pub struct RpcServerConfig { /// Configs for JSON-RPC IPC server ipc_server_config: Option>, /// The Endpoint where to launch the ipc server - ipc_endpoint: Option, + ipc_endpoint: Option, /// JWT secret for authentication jwt_secret: Option, } -impl fmt::Debug for RpcServerConfig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServerConfig") - .field("http_server_config", &self.http_server_config) - .field("http_cors_domains", &self.http_cors_domains) - .field("http_addr", &self.http_addr) - .field("ws_server_config", &self.ws_server_config) - .field("ws_addr", &self.ws_addr) - .field("ipc_server_config", &self.ipc_server_config) - .field("ipc_endpoint", &self.ipc_endpoint.as_ref().map(|endpoint| endpoint.path())) - .field("jwt_secret", &self.jwt_secret) - .finish() - } -} - /// === impl RpcServerConfig === impl RpcServerConfig { @@ -1599,7 +1584,7 @@ impl RpcServerConfig { /// /// Default is [DEFAULT_IPC_ENDPOINT] pub fn with_ipc_endpoint(mut self, path: impl Into) -> Self { - self.ipc_endpoint = Some(Endpoint::new(path.into())); + self.ipc_endpoint = Some(path.into()); self } @@ -1628,9 +1613,9 @@ impl RpcServerConfig { self.ws_addr } - /// Returns the [Endpoint] of the ipc server - pub fn ipc_endpoint(&self) -> Option<&Endpoint> { - self.ipc_endpoint.as_ref() + /// Returns the endpoint of the ipc server + pub fn ipc_endpoint(&self) -> Option { + self.ipc_endpoint.clone() } /// Convenience function to do [RpcServerConfig::build] and [RpcServer::start] in one step @@ -1759,12 +1744,10 @@ impl RpcServerConfig { if let Some(builder) = self.ipc_server_config { let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); - let ipc_path = self - .ipc_endpoint - .unwrap_or_else(|| Endpoint::new(DEFAULT_IPC_ENDPOINT.to_string())); + let ipc_path = self.ipc_endpoint.unwrap_or_else(|| DEFAULT_IPC_ENDPOINT.into()); let ipc = builder .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) - .build(ipc_path.path()); + .build(ipc_path); server.ipc = Some(ipc); } @@ -2152,8 +2135,8 @@ impl RpcServer { self.ws_http.ws_local_addr } - /// Returns the [`Endpoint`] of the ipc server if started. - pub fn ipc_endpoint(&self) -> Option<&Endpoint> { + /// Returns the endpoint of the ipc server if started. + pub fn ipc_endpoint(&self) -> Option { self.ipc.as_ref().map(|ipc| ipc.endpoint()) } @@ -2161,7 +2144,7 @@ impl RpcServer { /// /// This returns an [RpcServerHandle] that's connected to the server task(s) until the server is /// stopped or the [RpcServerHandle] is dropped. - #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint().map(|ipc|ipc.path())), target = "rpc", level = "TRACE")] + #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint()), target = "rpc", level = "TRACE")] pub async fn start(self, modules: TransportRpcModules) -> Result { trace!(target: "rpc", "staring RPC server"); let Self { ws_http, ipc: ipc_server } = self; @@ -2183,7 +2166,7 @@ impl RpcServer { if let Some((server, module)) = ipc_server.and_then(|server| ipc.map(|module| (server, module))) { - handle.ipc_endpoint = Some(server.endpoint().path().to_string()); + handle.ipc_endpoint = Some(server.endpoint()); handle.ipc = Some(server.start(module).await?); } diff --git a/deny.toml b/deny.toml index 347b60965..61cced4fb 100644 --- a/deny.toml +++ b/deny.toml @@ -58,6 +58,7 @@ exceptions = [ { allow = ["CC0-1.0"], name = "secp256k1-sys" }, { allow = ["CC0-1.0"], name = "tiny-keccak" }, { allow = ["CC0-1.0"], name = "more-asserts" }, + { allow = ["CC0-1.0"], name = "to_method" }, { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 From 7f0e81e476b910b42997f9cf418ca995e0c3d841 Mon Sep 17 00:00:00 2001 From: Sean Matt Date: Fri, 26 Apr 2024 09:47:12 -0400 Subject: [PATCH 078/250] refactor: remove WsHttpServerKind enum and simplify server launch (#7531) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-builder/src/cors.rs | 2 +- crates/rpc/rpc-builder/src/error.rs | 5 +- crates/rpc/rpc-builder/src/lib.rs | 237 ++++++++++------------------ 3 files changed, 91 insertions(+), 153 deletions(-) diff --git a/crates/rpc/rpc-builder/src/cors.rs b/crates/rpc/rpc-builder/src/cors.rs index 73e755f9f..46ff722ac 100644 --- a/crates/rpc/rpc-builder/src/cors.rs +++ b/crates/rpc/rpc-builder/src/cors.rs @@ -3,7 +3,7 @@ use tower_http::cors::{AllowOrigin, Any, CorsLayer}; /// Error thrown when parsing cors domains went wrong #[derive(Debug, thiserror::Error)] -pub(crate) enum CorsDomainError { +pub enum CorsDomainError { #[error("{domain} is an invalid header value")] InvalidHeader { domain: String }, #[error("wildcard origin (`*`) cannot be passed as part of a list: {input}")] diff --git a/crates/rpc/rpc-builder/src/error.rs b/crates/rpc/rpc-builder/src/error.rs index fd59536f7..68a2183fe 100644 --- a/crates/rpc/rpc-builder/src/error.rs +++ b/crates/rpc/rpc-builder/src/error.rs @@ -1,4 +1,4 @@ -use crate::RethRpcModule; +use crate::{cors::CorsDomainError, RethRpcModule}; use reth_ipc::server::IpcServerStartError; use std::{io, io::ErrorKind, net::SocketAddr}; @@ -57,6 +57,9 @@ pub enum RpcError { /// IO error. error: io::Error, }, + /// Cors parsing error. + #[error(transparent)] + Cors(#[from] CorsDomainError), /// Http and WS server configured on the same port but with conflicting settings. #[error(transparent)] WsHttpSamePortError(#[from] WsHttpSamePortError), diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 9c28353c9..4bd367060 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -156,8 +156,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::{ - auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics, - RpcModuleSelection::Selection, + auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, + metrics::RpcRequestMetrics, RpcModuleSelection::Selection, }; use constants::*; use error::{RpcError, ServerKind}; @@ -1623,6 +1623,16 @@ impl RpcServerConfig { self.build(&modules).await?.start(modules).await } + /// Creates the [CorsLayer] if any + fn maybe_cors_layer(cors: Option) -> Result, CorsDomainError> { + cors.as_deref().map(cors::create_cors_layer).transpose() + } + + /// Creates the [AuthLayer] if any + fn maybe_jwt_layer(&self) -> Option> { + self.jwt_secret.clone().map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) + } + /// Builds the ws and http server(s). /// /// If both are on the same port, they are combined into one server. @@ -1634,7 +1644,6 @@ impl RpcServerConfig { Ipv4Addr::LOCALHOST, DEFAULT_HTTP_RPC_PORT, ))); - let jwt_secret = self.jwt_secret.clone(); let ws_socket_addr = self .ws_addr @@ -1660,33 +1669,39 @@ impl RpcServerConfig { } .cloned(); - let secret = self.jwt_secret.clone(); - // we merge this into one server using the http setup self.ws_server_config.take(); modules.config.ensure_ws_http_identical()?; let builder = self.http_server_config.take().expect("http_server_config is Some"); - let (server, addr) = WsHttpServerKind::build( - builder, - http_socket_addr, - cors, - secret, - ServerKind::WsHttp(http_socket_addr), - modules - .http - .as_ref() - .or(modules.ws.as_ref()) - .map(RpcRequestMetrics::same_port) - .unwrap_or_default(), - ) - .await?; + let server = builder + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(cors)?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules + .http + .as_ref() + .or(modules.ws.as_ref()) + .map(RpcRequestMetrics::same_port) + .unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; + let addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; return Ok(WsHttpServer { http_local_addr: Some(addr), ws_local_addr: Some(addr), server: WsHttpServers::SamePort(server), - jwt_secret, + jwt_secret: self.jwt_secret.clone(), }) } @@ -1696,32 +1711,48 @@ impl RpcServerConfig { let mut ws_local_addr = None; let mut ws_server = None; if let Some(builder) = self.ws_server_config.take() { - let builder = builder.ws_only(); - let (server, addr) = WsHttpServerKind::build( - builder, - ws_socket_addr, - self.ws_cors_domains.take(), - self.jwt_secret.clone(), - ServerKind::WS(ws_socket_addr), - modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default(), - ) - .await?; + let server = builder + .ws_only() + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new() + .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()), + ) + .build(ws_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + let addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + ws_local_addr = Some(addr); ws_server = Some(server); } if let Some(builder) = self.http_server_config.take() { - let builder = builder.http_only(); - let (server, addr) = WsHttpServerKind::build( - builder, - http_socket_addr, - self.http_cors_domains.take(), - self.jwt_secret.clone(), - ServerKind::Http(http_socket_addr), - modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), - ) - .await?; - http_local_addr = Some(addr); + let server = builder + .http_only() + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::Http(http_socket_addr)))?; + let local_addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::Http(http_socket_addr)))?; + http_local_addr = Some(local_addr); http_server = Some(server); } @@ -1729,7 +1760,7 @@ impl RpcServerConfig { http_local_addr, ws_local_addr, server: WsHttpServers::DifferentPort { http: http_server, ws: ws_server }, - jwt_secret, + jwt_secret: self.jwt_secret.clone(), }) } @@ -1945,6 +1976,15 @@ struct WsHttpServer { jwt_secret: Option, } +// Define the type alias with detailed type complexity +type WsHttpServerKind = Server< + Stack< + tower::util::Either, Identity>, + Stack, Identity>, + >, + Stack, +>; + /// Enum for holding the http and ws servers in all possible combinations. enum WsHttpServers { /// Both servers are on the same port @@ -1966,13 +2006,13 @@ impl WsHttpServers { let mut http_handle = None; let mut ws_handle = None; match self { - WsHttpServers::SamePort(both) => { + WsHttpServers::SamePort(server) => { // Make sure http and ws modules are identical, since we currently can't run // different modules on same server config.ensure_ws_http_identical()?; if let Some(module) = http_module.or(ws_module) { - let handle = both.start(module).await; + let handle = server.start(module); http_handle = Some(handle.clone()); ws_handle = Some(handle); } @@ -1981,12 +2021,12 @@ impl WsHttpServers { if let Some((server, module)) = http.and_then(|server| http_module.map(|module| (server, module))) { - http_handle = Some(server.start(module).await); + http_handle = Some(server.start(module)); } if let Some((server, module)) = ws.and_then(|server| ws_module.map(|module| (server, module))) { - ws_handle = Some(server.start(module).await); + ws_handle = Some(server.start(module)); } } } @@ -2001,111 +2041,6 @@ impl Default for WsHttpServers { } } -/// Http Servers Enum -#[allow(clippy::type_complexity)] -enum WsHttpServerKind { - /// Http server - Plain(Server>), - /// Http server with cors - WithCors(Server, Stack>), - /// Http server with auth - WithAuth( - Server, Identity>, Stack>, - ), - /// Http server with cors and auth - WithCorsAuth( - Server< - Stack, Stack>, - Stack, - >, - ), -} - -// === impl WsHttpServerKind === - -impl WsHttpServerKind { - /// Starts the server and returns the handle - async fn start(self, module: RpcModule<()>) -> ServerHandle { - match self { - WsHttpServerKind::Plain(server) => server.start(module), - WsHttpServerKind::WithCors(server) => server.start(module), - WsHttpServerKind::WithAuth(server) => server.start(module), - WsHttpServerKind::WithCorsAuth(server) => server.start(module), - } - } - - /// Builds the server according to the given config parameters. - /// - /// Returns the address of the started server. - async fn build( - builder: ServerBuilder, - socket_addr: SocketAddr, - cors_domains: Option, - jwt_secret: Option, - server_kind: ServerKind, - metrics: RpcRequestMetrics, - ) -> Result<(Self, SocketAddr), RpcError> { - if let Some(cors) = cors_domains.as_deref().map(cors::create_cors_layer) { - let cors = cors.map_err(|err| RpcError::Custom(err.to_string()))?; - - if let Some(secret) = jwt_secret { - // stack cors and auth layers - let middleware = tower::ServiceBuilder::new() - .layer(cors) - .layer(AuthLayer::new(JwtAuthValidator::new(secret.clone()))); - - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithCorsAuth(server); - Ok((server, local_addr)) - } else { - let middleware = tower::ServiceBuilder::new().layer(cors); - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithCors(server); - Ok((server, local_addr)) - } - } else if let Some(secret) = jwt_secret { - // jwt auth layered service - let middleware = tower::ServiceBuilder::new() - .layer(AuthLayer::new(JwtAuthValidator::new(secret.clone()))); - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithAuth(server); - Ok((server, local_addr)) - } else { - // plain server without any middleware - let server = builder - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::Plain(server); - Ok((server, local_addr)) - } - } -} - /// Container type for each transport ie. http, ws, and ipc server pub struct RpcServer { /// Configured ws,http servers From 953ba043adca08c35b21c6383a690d9850944ca4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 26 Apr 2024 17:04:06 +0200 Subject: [PATCH 079/250] chore: bump alloy 4e22b9e (#7895) --- Cargo.lock | 71 +++++++++++++------ Cargo.toml | 26 +++---- crates/e2e-test-utils/src/transaction.rs | 2 +- crates/primitives/src/withdrawal.rs | 8 +-- crates/rpc/rpc-api/src/eth.rs | 5 +- crates/rpc/rpc-builder/tests/it/http.rs | 2 +- .../rpc-types-compat/src/transaction/mod.rs | 2 +- crates/rpc/rpc/src/eth/api/call.rs | 6 +- crates/rpc/rpc/src/eth/api/server.rs | 17 ++--- crates/rpc/rpc/src/eth/api/transactions.rs | 20 ++---- crates/rpc/rpc/src/eth/revm_utils.rs | 8 ++- 11 files changed, 94 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a68ee773..c046501c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,7 +133,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +166,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -185,11 +185,12 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-serde", "serde", + "serde_json", ] [[package]] @@ -207,7 +208,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "serde", @@ -219,7 +220,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-eips", @@ -227,6 +228,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types", "alloy-signer", + "alloy-sol-types", "async-trait", "futures-utils-wasm", "thiserror", @@ -235,7 +237,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -277,7 +279,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -327,7 +329,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -347,7 +349,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-eips", @@ -369,7 +371,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-serde", @@ -379,7 +381,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-eips", @@ -390,6 +392,8 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", + "jsonwebtoken 9.3.0", + "rand 0.8.5", "serde", "thiserror", ] @@ -397,7 +401,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -409,7 +413,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "serde", @@ -419,7 +423,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-primitives", "async-trait", @@ -432,7 +436,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-consensus", "alloy-network", @@ -507,7 +511,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -525,7 +529,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -3199,8 +3203,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -4341,13 +4347,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.7", - "pem", + "pem 1.1.1", "ring 0.16.20", "serde", "serde_json", "simple_asn1", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.7", + "js-sys", + "pem 3.0.4", + "ring 0.17.8", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "k256" version = "0.13.3" @@ -5413,6 +5434,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.0", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -7459,7 +7490,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "jsonwebtoken", + "jsonwebtoken 8.3.0", "metrics", "parking_lot 0.12.2", "pin-project", @@ -7849,7 +7880,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=dc614ee#dc614eec85ee4d4af938865b121fad58ec7dad5f" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=848d568#848d5688d0c499c538b9a78b423a7061525aa580" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index bd4fd5fd9..b62467405 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,7 +277,7 @@ reth-optimism-consensus = { path = "crates/optimism/consensus" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "dc614ee" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "848d568" } # eth alloy-chains = "0.1.15" @@ -286,20 +286,20 @@ alloy-dyn-abi = "0.7.1" alloy-sol-types = "0.7.1" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "39b8695" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "4e22b9e" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } # misc auto_impl = "1" diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index a2c40052c..ea066304b 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -69,7 +69,7 @@ fn tx(chain_id: u64, data: Option, nonce: u64) -> TransactionRequest { TransactionRequest { nonce: Some(nonce), value: Some(U256::from(100)), - to: Some(Address::random()), + to: Some(reth_primitives::TxKind::Call(Address::random())), gas: Some(210000), max_fee_per_gas: Some(20e9 as u128), max_priority_fee_per_gas: Some(20e9 as u128), diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index 730fb291c..a348b6a05 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,4 +1,4 @@ -use crate::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address}; +use crate::{constants::GWEI_TO_WEI, serde_helper::u64_via_ruint, Address}; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use reth_codecs::{main_codec, Compact}; use std::{ @@ -11,15 +11,15 @@ use std::{ #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] pub struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_hex")] + #[serde(with = "u64_via_ruint")] pub index: u64, /// Index of validator associated with withdrawal. - #[serde(with = "u64_hex", rename = "validatorIndex")] + #[serde(with = "u64_via_ruint", rename = "validatorIndex")] pub validator_index: u64, /// Target address for withdrawn ether. pub address: Address, /// Value of the withdrawal in gwei. - #[serde(with = "u64_hex")] + #[serde(with = "u64_via_ruint")] pub amount: u64, } diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs index c878a7e1c..8811ef87d 100644 --- a/crates/rpc/rpc-api/src/eth.rs +++ b/crates/rpc/rpc-api/src/eth.rs @@ -1,7 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{ - serde_helper::{num::U64HexOrNumber, JsonStorageKey}, - Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, + serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, }; use reth_rpc_types::{ state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, @@ -247,7 +246,7 @@ pub trait EthApi { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64HexOrNumber, + block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> RpcResult; diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7fc714a2d..42fecb87d 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -167,7 +167,7 @@ where EthApiClient::block_number(client).await.unwrap(); EthApiClient::get_code(client, address, None).await.unwrap(); EthApiClient::send_raw_transaction(client, tx).await.unwrap(); - EthApiClient::fee_history(client, 0.into(), block_number, None).await.unwrap(); + EthApiClient::fee_history(client, 0, block_number, None).await.unwrap(); EthApiClient::balance(client, address, None).await.unwrap(); EthApiClient::transaction_count(client, address, None).await.unwrap(); EthApiClient::storage_at(client, address, U256::default().into(), None).await.unwrap(); diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 1004e93e2..d0f4672a2 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -181,7 +181,7 @@ pub fn from_primitive_access_list( /// Convert [TransactionSignedEcRecovered] to [TransactionRequest] pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { let from = tx.signer(); - let to = tx.transaction.to(); + let to = Some(tx.transaction.to().into()); let gas = tx.transaction.gas_limit(); let value = tx.transaction.value(); let input = tx.transaction.input().clone(); diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 62be2612c..d556249c3 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -14,7 +14,9 @@ use crate::{ }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, U256}; +use reth_primitives::{ + revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, TxKind, U256, +}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; @@ -411,7 +413,7 @@ where } let from = request.from.unwrap_or_default(); - let to = if let Some(to) = request.to { + let to = if let Some(TxKind::Call(to)) = request.to { to } else { let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 6be1a88af..2648df08f 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -8,8 +8,7 @@ use tracing::trace; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_primitives::{ - serde_helper::{num::U64HexOrNumber, JsonStorageKey}, - Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, + serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, }; use reth_provider::{ BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, @@ -353,14 +352,12 @@ where /// Handler for: `eth_feeHistory` async fn fee_history( &self, - block_count: U64HexOrNumber, + block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> Result { trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); - return Ok( - EthApi::fee_history(self, block_count.to(), newest_block, reward_percentiles).await? - ) + return Ok(EthApi::fee_history(self, block_count, newest_block, reward_percentiles).await?) } /// Handler for: `eth_mining` @@ -585,7 +582,7 @@ mod tests { async fn test_fee_history_empty() { let response = as EthApiServer>::fee_history( &build_test_eth_api(NoopProvider::default()), - 1.into(), + 1, BlockNumberOrTag::Latest, None, ) @@ -607,7 +604,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - (newest_block + 1).into(), + newest_block + 1, newest_block.into(), Some(vec![10.0]), ) @@ -630,7 +627,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - 1.into(), + 1, (newest_block + 1000).into(), Some(vec![10.0]), ) @@ -653,7 +650,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - 0.into(), + 0, newest_block.into(), None, ) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 1ca8ed119..3e582821b 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -919,10 +919,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: None, })) } @@ -935,10 +932,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: 0, access_list, })) @@ -958,10 +952,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: 0, access_list: access_list.unwrap_or_default(), })) @@ -987,10 +978,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), access_list: access_list.unwrap_or_default(), // eip-4844 specific. diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 4b00d4662..c80aee99d 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -7,7 +7,7 @@ use reth_primitives::revm::env::fill_op_tx_env; use reth_primitives::revm::env::fill_tx_env; use reth_primitives::{ revm::env::fill_tx_env_with_recovered, Address, TransactionSigned, - TransactionSignedEcRecovered, TxHash, B256, U256, + TransactionSignedEcRecovered, TxHash, TxKind, B256, U256, }; use reth_rpc_types::{ state::{AccountOverride, StateOverride}, @@ -250,13 +250,17 @@ pub(crate) fn create_txn_env( )?; let gas_limit = gas.unwrap_or_else(|| block_env.gas_limit.min(U256::from(u64::MAX)).to()); + let transact_to = match to { + Some(TxKind::Call(to)) => TransactTo::call(to), + _ => TransactTo::create(), + }; let env = TxEnv { gas_limit: gas_limit.try_into().map_err(|_| RpcInvalidTransactionError::GasUintOverflow)?, nonce, caller: from.unwrap_or_default(), gas_price, gas_priority_fee: max_priority_fee_per_gas, - transact_to: to.map(TransactTo::Call).unwrap_or_else(TransactTo::create), + transact_to, value: value.unwrap_or_default(), data: input.try_into_unique_input()?.unwrap_or_default(), chain_id, From 51bdc6afe8aea36726ba173e1741255475d1b1be Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 26 Apr 2024 16:14:35 +0100 Subject: [PATCH 080/250] fix(exex): skipping logic of the notifications (#7919) --- crates/exex/src/manager.rs | 59 ++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1c9eaf9ef..81e523718 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -84,31 +84,53 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (event_id, notification): &(usize, ExExNotification), + (notification_id, notification): &(usize, ExExNotification), ) -> Poll>> { - // check that this notification is above the finished height of the exex if the exex has set - // one if let Some(finished_height) = self.finished_height { match notification { - ExExNotification::ChainCommitted { new } | - ExExNotification::ChainReorged { old: _, new } - if finished_height >= new.tip().number => - { - self.next_notification_id = event_id + 1; - return Poll::Ready(Ok(())) + ExExNotification::ChainCommitted { new } => { + // Skip the chain commit notification if the finished height of the ExEx is + // higher than or equal to the tip of the new notification. + // I.e., the ExEx has already processed the notification. + if finished_height >= new.tip().number { + debug!( + exex_id = %self.id, + %notification_id, + %finished_height, + new_tip = %new.tip().number, + "Skipping notification" + ); + + self.next_notification_id = notification_id + 1; + return Poll::Ready(Ok(())) + } } - _ => (), + // Do not handle [ExExNotification::ChainReorged] and + // [ExExNotification::ChainReverted] cases and always send the + // notification, because the ExEx should be aware of the reorgs and reverts lower + // than its finished height + ExExNotification::ChainReorged { .. } | ExExNotification::ChainReverted { .. } => {} } } + debug!( + exex_id = %self.id, + %notification_id, + "Reserving slot for notification" + ); match self.sender.poll_reserve(cx) { Poll::Ready(Ok(())) => (), other => return other, } + debug!( + exex_id = %self.id, + %notification_id, + "Sending notification" + ); match self.sender.send_item(notification.clone()) { Ok(()) => { - self.next_notification_id = event_id + 1; + self.next_notification_id = notification_id + 1; self.metrics.notifications_sent_total.increment(1); Poll::Ready(Ok(())) } @@ -263,7 +285,11 @@ impl Future for ExExManager { // drain handle notifications while self.buffer.len() < self.max_capacity { if let Poll::Ready(Some(notification)) = self.handle_rx.poll_recv(cx) { - debug!("received new notification"); + debug!( + committed_tip = ?notification.committed_chain().map(|chain| chain.tip().number), + reverted_tip = ?notification.reverted_chain().map(|chain| chain.tip().number), + "Received new notification" + ); self.push_notification(notification); continue } @@ -285,11 +311,6 @@ impl Future for ExExManager { .checked_sub(self.min_id) .expect("exex expected notification ID outside the manager's range"); if let Some(notification) = self.buffer.get(notification_index) { - debug!( - exex.id, - notification_id = exex.next_notification_id, - "sent notification to exex" - ); if let Poll::Ready(Err(err)) = exex.send(cx, notification) { // the channel was closed, which is irrecoverable for the manager return Poll::Ready(Err(err.into())) @@ -300,9 +321,9 @@ impl Future for ExExManager { } // remove processed buffered notifications + debug!(%min_id, "Updating lowest notification id in buffer"); self.buffer.retain(|&(id, _)| id >= min_id); self.min_id = min_id; - debug!(min_id, "lowest notification id in buffer updated"); // update capacity self.update_capacity(); @@ -310,7 +331,7 @@ impl Future for ExExManager { // handle incoming exex events for exex in self.exex_handles.iter_mut() { while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { - debug!(?event, id = exex.id, "received event from exex"); + debug!(exex_id = exex.id, ?event, "Received event from exex"); exex.metrics.events_sent_total.increment(1); match event { ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), From 704b3e3ac496d7ae72a964e269bc1ee1bb7a809e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 26 Apr 2024 18:39:35 +0200 Subject: [PATCH 081/250] chore(sync): add block number to body validation error (#7918) Co-authored-by: Oliver Nordbjerg Co-authored-by: Matthias Seitz --- crates/interfaces/src/p2p/error.rs | 6 ++++-- crates/net/downloaders/src/bodies/request.rs | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 6d822f44c..f63f8879a 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -158,10 +158,12 @@ pub enum DownloadError { /* ==================== BODIES ERRORS ==================== */ /// Block validation failed - #[error("failed to validate body for header {hash}: {error}")] + #[error("failed to validate body for header {hash}, block number {number}: {error}")] BodyValidation { - /// Hash of header failing validation + /// Hash of the block failing validation hash: B256, + /// Number of the block failing validation + number: u64, /// The details of validation failure #[source] error: Box, diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index d6da2444c..032fb3ebc 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -184,8 +184,13 @@ where if let Err(error) = self.consensus.validate_block(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); + let number = block.number; self.pending_headers.push_front(block.header); - return Err(DownloadError::BodyValidation { hash, error: Box::new(error) }) + return Err(DownloadError::BodyValidation { + hash, + number, + error: Box::new(error), + }) } self.buffer.push(BlockResponse::Full(block)); From ffa36b7348b2e5f4bdb09804961eda597131b91d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 26 Apr 2024 20:46:44 +0200 Subject: [PATCH 082/250] use default implementation for `BlockId` (#7917) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/debug.rs | 4 ++-- crates/rpc/rpc/src/eth/api/call.rs | 17 +++++------------ crates/rpc/rpc/src/eth/api/server.rs | 8 +------- crates/rpc/rpc/src/eth/api/state.rs | 2 +- crates/rpc/rpc/src/trace.rs | 8 ++------ 5 files changed, 11 insertions(+), 28 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b212d1636..e47ccc466 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -267,7 +267,7 @@ where block_id: Option, opts: GethDebugTracingCallOptions, ) -> EthResult { - let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let at = block_id.unwrap_or_default(); let GethDebugTracingCallOptions { tracing_options, state_overrides, block_overrides } = opts; let overrides = EvmOverrides::new(state_overrides, block_overrides.map(Box::new)); @@ -420,7 +420,7 @@ where let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); let transaction_index = transaction_index.unwrap_or_default(); - let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let target_block = block_number.unwrap_or_default(); let ((cfg, mut block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(target_block), self.inner.eth_api.block_by_id_with_senders(target_block), diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index d556249c3..7066f7372 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -14,9 +14,7 @@ use crate::{ }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{ - revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, TxKind, U256, -}; +use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, TxKind, U256}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; @@ -73,13 +71,8 @@ where block_number: Option, overrides: EvmOverrides, ) -> EthResult { - let (res, _env) = self - .transact_call_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - overrides, - ) - .await?; + let (res, _env) = + self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; ensure_success(res.result) } @@ -100,7 +93,7 @@ where let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); let transaction_index = transaction_index.unwrap_or_default(); - let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let target_block = block_number.unwrap_or_default(); let is_block_target_pending = target_block.is_pending(); let ((cfg, block_env, _), block) = futures::try_join!( @@ -390,7 +383,7 @@ where mut request: TransactionRequest, at: Option, ) -> EthResult { - let block_id = at.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let block_id = at.unwrap_or_default(); let (cfg, block, at) = self.evm_env_at(block_id).await?; let state = self.state_at(at)?; diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 2648df08f..c2be79a10 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -314,13 +314,7 @@ where state_override: Option, ) -> Result { trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); - Ok(self - .estimate_gas_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - state_override, - ) - .await?) + Ok(self.estimate_gas_at(request, block_number.unwrap_or_default(), state_override).await?) } /// Handler for: `eth_gasPrice` diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index f739c4a75..7f0bdd4e2 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -84,7 +84,7 @@ where block_id: Option, ) -> EthResult { let chain_info = self.provider().chain_info()?; - let block_id = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let block_id = block_id.unwrap_or_default(); // if we are trying to create a proof for the latest block, but have a BlockId as input // that is not BlockNumberOrTag::Latest, then we need to figure out whether or not the diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index ade8291c3..047919036 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -78,7 +78,7 @@ where { /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { - let at = trace_request.block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); let overrides = EvmOverrides::new(trace_request.state_overrides, trace_request.block_overrides); @@ -106,11 +106,7 @@ where ) -> EthResult { let tx = recover_raw_transaction(tx)?; - let (cfg, block, at) = self - .inner - .eth_api - .evm_env_at(block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest))) - .await?; + let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; let tx = tx_env_with_recovered(&tx.into_ecrecovered_transaction()); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx); From 2f052a81120507dc90de4352cb1dca38de86836a Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Sat, 27 Apr 2024 11:47:34 +0800 Subject: [PATCH 083/250] feat: replace low level IPC with interprocess (#7922) --- Cargo.lock | 1 - crates/rpc/ipc/Cargo.toml | 3 - crates/rpc/ipc/src/client/mod.rs | 93 +++++++++++++++++++++++++------ crates/rpc/ipc/src/client/unix.rs | 82 --------------------------- crates/rpc/ipc/src/client/win.rs | 82 --------------------------- 5 files changed, 77 insertions(+), 184 deletions(-) delete mode 100644 crates/rpc/ipc/src/client/unix.rs delete mode 100644 crates/rpc/ipc/src/client/win.rs diff --git a/Cargo.lock b/Cargo.lock index c046501c6..c66304e91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6915,7 +6915,6 @@ dependencies = [ "tokio-util", "tower", "tracing", - "windows-sys 0.52.0", ] [[package]] diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 094fa5759..af6e64db1 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -31,9 +31,6 @@ thiserror.workspace = true futures-util = "0.3.30" interprocess = { version = "1.2.1", features = ["tokio_support"] } -[target.'cfg(windows)'.dependencies] -windows-sys = { version = "0.52.0", features = ["Win32_Foundation"] } - [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } reth-tracing.workspace = true diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index 8ca4b5406..05ea7ed58 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -1,24 +1,85 @@ //! [`jsonrpsee`] transport adapter implementation for IPC. -use std::{ - io, - path::{Path, PathBuf}, -}; - +use crate::stream_codec::StreamCodec; +use futures::StreamExt; +use interprocess::local_socket::tokio::{LocalSocketStream, OwnedReadHalf, OwnedWriteHalf}; use jsonrpsee::{ async_client::{Client, ClientBuilder}, - core::client::{TransportReceiverT, TransportSenderT}, + core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}, +}; +use std::io; +use tokio::io::AsyncWriteExt; +use tokio_util::{ + codec::FramedRead, + compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt}, }; -#[cfg(unix)] -use crate::client::unix::IpcTransportClientBuilder; -#[cfg(windows)] -use crate::client::win::IpcTransportClientBuilder; +/// Sending end of IPC transport. +#[derive(Debug)] +pub(crate) struct Sender { + inner: Compat, +} + +#[async_trait::async_trait] +impl TransportSenderT for Sender { + type Error = IpcError; + + /// Sends out a request. Returns a Future that finishes when the request has been successfully + /// sent. + async fn send(&mut self, msg: String) -> Result<(), Self::Error> { + Ok(self.inner.write_all(msg.as_bytes()).await?) + } + + async fn send_ping(&mut self) -> Result<(), Self::Error> { + tracing::trace!("send ping - not implemented"); + Err(IpcError::NotSupported) + } + + /// Close the connection. + async fn close(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Receiving end of IPC transport. +#[derive(Debug)] +pub(crate) struct Receiver { + pub(crate) inner: FramedRead, StreamCodec>, +} + +#[async_trait::async_trait] +impl TransportReceiverT for Receiver { + type Error = IpcError; + + /// Returns a Future resolving when the server sent us something back. + async fn receive(&mut self) -> Result { + self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) + } +} + +/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub(crate) struct IpcTransportClientBuilder; + +impl IpcTransportClientBuilder { + pub(crate) async fn build( + self, + endpoint: impl AsRef, + ) -> Result<(Sender, Receiver), IpcError> { + let endpoint = endpoint.as_ref().to_string(); + let conn = LocalSocketStream::connect(endpoint.clone()) + .await + .map_err(|err| IpcError::FailedToConnect { path: endpoint, err })?; -#[cfg(unix)] -mod unix; -#[cfg(windows)] -mod win; + let (rhlf, whlf) = conn.into_split(); + + Ok(( + Sender { inner: whlf.compat_write() }, + Receiver { inner: FramedRead::new(rhlf.compat(), StreamCodec::stream_incoming()) }, + )) + } +} /// Builder type for [`Client`] #[derive(Clone, Default, Debug)] @@ -37,7 +98,7 @@ impl IpcClientBuilder { /// # Ok(()) /// # } /// ``` - pub async fn build(self, path: impl AsRef) -> Result { + pub async fn build(self, path: impl AsRef) -> Result { let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; Ok(self.build_with_tokio(tx, rx)) } @@ -66,7 +127,7 @@ pub enum IpcError { FailedToConnect { /// The path of the socket. #[doc(hidden)] - path: PathBuf, + path: String, /// The error occurred while connecting. #[doc(hidden)] err: io::Error, diff --git a/crates/rpc/ipc/src/client/unix.rs b/crates/rpc/ipc/src/client/unix.rs deleted file mode 100644 index c7ed7bc7a..000000000 --- a/crates/rpc/ipc/src/client/unix.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! [`jsonrpsee`] transport adapter implementation for Unix IPC by using Unix Sockets. - -use crate::{client::IpcError, stream_codec::StreamCodec}; -use futures::StreamExt; -use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; -use std::path::Path; -use tokio::{ - io::AsyncWriteExt, - net::{ - unix::{OwnedReadHalf, OwnedWriteHalf}, - UnixStream, - }, -}; -use tokio_util::codec::FramedRead; - -/// Sending end of IPC transport. -#[derive(Debug)] -pub(crate) struct Sender { - inner: OwnedWriteHalf, -} - -#[async_trait::async_trait] -impl TransportSenderT for Sender { - type Error = IpcError; - - /// Sends out a request. Returns a Future that finishes when the request has been successfully - /// sent. - async fn send(&mut self, msg: String) -> Result<(), Self::Error> { - Ok(self.inner.write_all(msg.as_bytes()).await?) - } - - async fn send_ping(&mut self) -> Result<(), Self::Error> { - tracing::trace!("send ping - not implemented"); - Err(IpcError::NotSupported) - } - - /// Close the connection. - async fn close(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Receiving end of IPC transport. -#[derive(Debug)] -pub(crate) struct Receiver { - pub(crate) inner: FramedRead, -} - -#[async_trait::async_trait] -impl TransportReceiverT for Receiver { - type Error = IpcError; - - /// Returns a Future resolving when the server sent us something back. - async fn receive(&mut self) -> Result { - self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) - } -} - -/// Builder for IPC transport [`Sender`] and [`Receiver`] pair. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub(crate) struct IpcTransportClientBuilder; - -impl IpcTransportClientBuilder { - pub(crate) async fn build( - self, - path: impl AsRef, - ) -> Result<(Sender, Receiver), IpcError> { - let path = path.as_ref(); - - let stream = UnixStream::connect(path) - .await - .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; - - let (rhlf, whlf) = stream.into_split(); - - Ok(( - Sender { inner: whlf }, - Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, - )) - } -} diff --git a/crates/rpc/ipc/src/client/win.rs b/crates/rpc/ipc/src/client/win.rs deleted file mode 100644 index 69b3140fe..000000000 --- a/crates/rpc/ipc/src/client/win.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! [`jsonrpsee`] transport adapter implementation for Windows IPC by using NamedPipes. - -use crate::{client::IpcError, stream_codec::StreamCodec}; -use jsonrpsee::core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}; -use std::{path::Path, sync::Arc}; -use tokio::{ - io::AsyncWriteExt, - net::windows::named_pipe::{ClientOptions, NamedPipeClient}, - time, - time::Duration, -}; -use tokio_stream::StreamExt; -use tokio_util::codec::FramedRead; -use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; - -/// Sending end of IPC transport. -#[derive(Debug)] -pub struct Sender { - inner: Arc, -} - -#[async_trait::async_trait] -impl TransportSenderT for Sender { - type Error = IpcError; - - /// Sends out a request. Returns a Future that finishes when the request has been successfully - /// sent. - async fn send(&mut self, msg: String) -> Result<(), Self::Error> { - Ok(self.inner.write_all(msg.as_bytes()).await?) - } - - async fn send_ping(&mut self) -> Result<(), Self::Error> { - tracing::trace!("send ping - not implemented"); - Err(IpcError::NotSupported) - } - - /// Close the connection. - async fn close(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Receiving end of IPC transport. -#[derive(Debug)] -pub struct Receiver { - inner: FramedRead, StreamCodec>, -} - -#[async_trait::async_trait] -impl TransportReceiverT for Receiver { - type Error = IpcError; - - /// Returns a Future resolving when the server sent us something back. - async fn receive(&mut self) -> Result { - self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?))) - } -} - -/// Builder for IPC transport [`crate::client::win::Sender`] and [`crate::client::win::Receiver`] -/// pair. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct IpcTransportClientBuilder; - -impl IpcTransportClientBuilder { - pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { - let addr = path.as_ref().as_os_str(); - let client = loop { - match ClientOptions::new().open(addr) { - Ok(client) => break client, - Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), - Err(e) => return IpcError::FailedToConnect { path: path.to_path_buf(), err: e }, - } - time::sleep(Duration::from_mills(50)).await; - }; - let client = Arc::new(client); - Ok(( - Sender { inner: client.clone() }, - Receiver { inner: FramedRead::new(client, StreamCodec::stream_incoming()) }, - )) - } -} From 6bdba8a2a6c22c2fc8762c8075cd7ebd5ec1bb5d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 06:25:09 +0200 Subject: [PATCH 084/250] chore: misc clippy fixes (#7926) --- crates/optimism/consensus/src/lib.rs | 1 - crates/optimism/evm/src/execute.rs | 7 +++---- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/stages/src/stages/headers.rs | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 9a905adfa..4deea2879 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -5,7 +5,6 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 5b7d797da..c56c7622e 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -541,12 +541,11 @@ mod tests { b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, }; - use reth_revm::{database::StateProviderDatabase, L1_BLOCK_CONTRACT}; + use reth_revm::{ + database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, + }; use std::{collections::HashMap, str::FromStr}; - use crate::OptimismEvmConfig; - use reth_revm::test_utils::StateProviderTest; - fn create_op_state_provider() -> StateProviderTest { let mut db = StateProviderTest::default(); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index c876457e1..7239249e1 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -788,7 +788,7 @@ pub fn dummy_endpoint() -> String { mod tests { use super::*; use crate::client::IpcClientBuilder; - use futures::future::{select, Either}; + use futures::future::select; use jsonrpsee::{ core::{ client, diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index fd1484128..548048dd7 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -387,7 +387,7 @@ mod tests { use reth_primitives::{ stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, }; - use reth_provider::{BlockHashReader, BlockWriter, BundleStateWithReceipts, ProviderFactory}; + use reth_provider::{BlockWriter, BundleStateWithReceipts, ProviderFactory}; use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; From cc4a418ddf73ae5af240b403e4b6e1d191130cee Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Sat, 27 Apr 2024 06:18:59 +0200 Subject: [PATCH 085/250] refactor: extract peer types to net/types (#7912) Co-authored-by: Matthias Seitz --- Cargo.lock | 30 ++- Cargo.toml | 4 +- crates/consensus/auto-seal/Cargo.toml | 1 + crates/consensus/auto-seal/src/client.rs | 5 +- crates/interfaces/Cargo.toml | 1 + crates/interfaces/src/p2p/download.rs | 2 +- crates/interfaces/src/p2p/either.rs | 2 +- crates/interfaces/src/p2p/error.rs | 5 +- crates/interfaces/src/p2p/full_block.rs | 3 +- crates/interfaces/src/test_utils/bodies.rs | 2 +- .../interfaces/src/test_utils/full_block.rs | 5 +- crates/interfaces/src/test_utils/headers.rs | 3 +- crates/net/common/Cargo.toml | 2 +- crates/net/common/src/ban_list.rs | 2 +- crates/net/discv4/Cargo.toml | 1 + crates/net/discv4/src/lib.rs | 6 +- crates/net/discv4/src/node.rs | 3 +- crates/net/discv4/src/proto.rs | 5 +- crates/net/discv4/src/table.rs | 2 +- crates/net/discv4/src/test_utils.rs | 3 +- crates/net/discv5/Cargo.toml | 1 + crates/net/discv5/src/enr.rs | 2 +- crates/net/discv5/src/lib.rs | 3 +- crates/net/dns/Cargo.toml | 1 + crates/net/dns/src/lib.rs | 3 +- crates/net/downloaders/Cargo.toml | 1 + crates/net/downloaders/src/bodies/request.rs | 5 +- crates/net/downloaders/src/file_client.rs | 3 +- .../src/headers/reverse_headers.rs | 4 +- .../src/test_utils/bodies_client.rs | 3 +- crates/net/ecies/Cargo.toml | 1 + crates/net/ecies/src/algorithm.rs | 3 +- crates/net/ecies/src/stream.rs | 2 +- crates/net/eth-wire/Cargo.toml | 1 + crates/net/eth-wire/src/ethstream.rs | 3 +- crates/net/eth-wire/src/hello.rs | 9 +- crates/net/eth-wire/src/muxdemux.rs | 3 +- crates/net/eth-wire/src/test_utils.rs | 3 +- crates/net/network-api/Cargo.toml | 1 + crates/net/network-api/src/lib.rs | 3 +- crates/net/network-api/src/noop.rs | 3 +- crates/net/network/Cargo.toml | 1 + crates/net/network/src/config.rs | 4 +- crates/net/network/src/discovery.rs | 3 +- crates/net/network/src/eth_requests.rs | 3 +- crates/net/network/src/fetch/client.rs | 3 +- crates/net/network/src/fetch/mod.rs | 3 +- crates/net/network/src/import.rs | 2 +- crates/net/network/src/manager.rs | 3 +- crates/net/network/src/message.rs | 3 +- crates/net/network/src/network.rs | 3 +- crates/net/network/src/peers/manager.rs | 6 +- crates/net/network/src/session/active.rs | 5 +- crates/net/network/src/session/handle.rs | 2 +- crates/net/network/src/session/mod.rs | 3 +- crates/net/network/src/state.rs | 6 +- crates/net/network/src/swarm.rs | 2 +- crates/net/network/src/test_utils/init.rs | 2 +- crates/net/network/src/test_utils/testnet.rs | 3 +- .../net/network/src/transactions/fetcher.rs | 3 +- crates/net/network/src/transactions/mod.rs | 4 +- crates/net/types/Cargo.toml | 27 ++ crates/net/types/src/lib.rs | 239 ++++++++++++++++++ crates/primitives/Cargo.toml | 2 - crates/primitives/src/lib.rs | 2 - crates/rpc/rpc-api/Cargo.toml | 1 + crates/rpc/rpc-api/src/admin.rs | 3 +- crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/admin.rs | 3 +- crates/stages/Cargo.toml | 1 + crates/stages/src/lib.rs | 3 +- crates/transaction-pool/Cargo.toml | 1 + crates/transaction-pool/src/traits.rs | 8 +- examples/manual-p2p/Cargo.toml | 11 +- examples/manual-p2p/src/main.rs | 3 +- 75 files changed, 428 insertions(+), 86 deletions(-) create mode 100644 crates/net/types/Cargo.toml create mode 100644 crates/net/types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c66304e91..a40fb4513 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4741,6 +4741,7 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-network", + "reth-network-types", "reth-primitives", "secp256k1", "tokio", @@ -6323,6 +6324,7 @@ dependencies = [ "reth-engine-primitives", "reth-evm", "reth-interfaces", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -6553,6 +6555,7 @@ dependencies = [ "rand 0.8.5", "reth-net-common", "reth-net-nat", + "reth-network-types", "reth-primitives", "reth-tracing", "secp256k1", @@ -6578,6 +6581,7 @@ dependencies = [ "multiaddr", "rand 0.8.5", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-tracing", "secp256k1", @@ -6597,6 +6601,7 @@ dependencies = [ "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", + "reth-network-types", "reth-primitives", "reth-tracing", "schnellru", @@ -6628,6 +6633,7 @@ dependencies = [ "reth-db", "reth-interfaces", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-provider", "reth-tasks", @@ -6689,6 +6695,7 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-net-common", + "reth-network-types", "reth-primitives", "secp256k1", "sha2 0.10.8", @@ -6732,6 +6739,7 @@ dependencies = [ "reth-eth-wire-types", "reth-metrics", "reth-net-common", + "reth-network-types", "reth-primitives", "reth-tracing", "secp256k1", @@ -6888,6 +6896,7 @@ dependencies = [ "reth-consensus", "reth-eth-wire-types", "reth-network-api", + "reth-network-types", "reth-primitives", "secp256k1", "thiserror", @@ -6978,7 +6987,7 @@ name = "reth-net-common" version = "0.2.0-beta.6" dependencies = [ "pin-project", - "reth-primitives", + "reth-network-types", "tokio", ] @@ -7031,6 +7040,7 @@ dependencies = [ "reth-net-common", "reth-network", "reth-network-api", + "reth-network-types", "reth-primitives", "reth-provider", "reth-rpc-types", @@ -7059,6 +7069,7 @@ dependencies = [ "enr", "reth-discv4", "reth-eth-wire", + "reth-network-types", "reth-primitives", "reth-rpc-types", "serde", @@ -7066,6 +7077,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "reth-network-types" +version = "0.2.0-beta.6" +dependencies = [ + "enr", + "reth-primitives", + "reth-rpc-types", + "secp256k1", + "serde_with", +] + [[package]] name = "reth-nippy-jar" version = "0.2.0-beta.6" @@ -7369,7 +7391,6 @@ dependencies = [ "clap", "criterion", "derive_more", - "enr", "hash-db", "itertools 0.12.1", "modular-bitfield", @@ -7390,7 +7411,6 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serde_with", "sha2 0.10.8", "strum 0.26.2", "sucds", @@ -7500,6 +7520,7 @@ dependencies = [ "reth-interfaces", "reth-metrics", "reth-network-api", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -7531,6 +7552,7 @@ version = "0.2.0-beta.6" dependencies = [ "jsonrpsee", "reth-engine-primitives", + "reth-network-types", "reth-primitives", "reth-rpc-types", "serde", @@ -7679,6 +7701,7 @@ dependencies = [ "reth-evm-ethereum", "reth-exex", "reth-interfaces", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -7794,6 +7817,7 @@ dependencies = [ "rand 0.8.5", "reth-eth-wire", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", diff --git a/Cargo.toml b/Cargo.toml index b62467405..d56392c1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ members = [ "crates/net/nat/", "crates/net/network/", "crates/net/network-api/", + "crates/net/types/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", @@ -223,7 +224,6 @@ reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node-builder" } reth-node-ethereum = { path = "crates/node-ethereum" } -reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } @@ -249,6 +249,7 @@ reth-net-common = { path = "crates/net/common" } reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } +reth-network-types = { path = "crates/net/types" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-validator = { path = "crates/payload/validator" } @@ -273,6 +274,7 @@ reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } reth-optimism-consensus = { path = "crates/optimism/consensus" } +reth-node-events = { path = "crates/node/events" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index ec96426a4..435ade53d 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -24,6 +24,7 @@ reth-evm.workspace = true reth-engine-primitives.workspace = true reth-consensus.workspace = true reth-rpc-types.workspace = true +reth-network-types.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index 7ed69c289..67a84d5d9 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -7,9 +7,8 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersFut, HeadersRequest}, priority::Priority, }; -use reth_primitives::{ - BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId, WithPeerId, B256, -}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 6c066593b..c2e276a33 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -15,6 +15,7 @@ reth-primitives.workspace = true reth-network-api.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true +reth-network-types.workspace = true # async futures.workspace = true diff --git a/crates/interfaces/src/p2p/download.rs b/crates/interfaces/src/p2p/download.rs index b9fb6ab3e..823860507 100644 --- a/crates/interfaces/src/p2p/download.rs +++ b/crates/interfaces/src/p2p/download.rs @@ -1,4 +1,4 @@ -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::fmt::Debug; /// Generic download client for peer penalization diff --git a/crates/interfaces/src/p2p/either.rs b/crates/interfaces/src/p2p/either.rs index 1a6bd170c..af7f15018 100644 --- a/crates/interfaces/src/p2p/either.rs +++ b/crates/interfaces/src/p2p/either.rs @@ -22,7 +22,7 @@ where A: DownloadClient, B: DownloadClient, { - fn report_bad_message(&self, peer_id: reth_primitives::PeerId) { + fn report_bad_message(&self, peer_id: reth_network_types::PeerId) { match self { EitherDownloader::Left(a) => a.report_bad_message(peer_id), EitherDownloader::Right(b) => b.report_bad_message(peer_id), diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index f63f8879a..1a847b649 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -2,8 +2,9 @@ use super::headers::client::HeadersRequest; use crate::{db::DatabaseError, provider::ProviderError}; use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; +use reth_network_types::WithPeerId; use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, + BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, B256, }; use std::ops::RangeInclusive; use thiserror::Error; @@ -12,7 +13,7 @@ use tokio::sync::{mpsc, oneshot}; /// Result alias for result of a request. pub type RequestResult = Result; -/// Result with [PeerId][reth_primitives::PeerId] +/// Result with [PeerId][reth_network_types::PeerId] pub type PeerRequestResult = RequestResult>; /// Helper trait used to validate responses. diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 6cf3f2c81..dd8cfff4d 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -6,8 +6,9 @@ use crate::p2p::{ }; use futures::Stream; use reth_consensus::{Consensus, ConsensusError}; +use reth_network_types::WithPeerId; use reth_primitives::{ - BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, B256, + BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, B256, }; use std::{ cmp::Reverse, diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/interfaces/src/test_utils/bodies.rs index e1d42a2a5..8f0bfcef0 100644 --- a/crates/interfaces/src/test_utils/bodies.rs +++ b/crates/interfaces/src/test_utils/bodies.rs @@ -22,7 +22,7 @@ impl Debug for TestBodiesClient { } impl DownloadClient for TestBodiesClient { - fn report_bad_message(&self, _peer_id: reth_primitives::PeerId) { + fn report_bad_message(&self, _peer_id: reth_network_types::PeerId) { // noop } diff --git a/crates/interfaces/src/test_utils/full_block.rs b/crates/interfaces/src/test_utils/full_block.rs index a97104919..95c1c2b3a 100644 --- a/crates/interfaces/src/test_utils/full_block.rs +++ b/crates/interfaces/src/test_utils/full_block.rs @@ -6,9 +6,10 @@ use crate::p2p::{ priority::Priority, }; use parking_lot::Mutex; +use reth_network_types::{PeerId, WithPeerId}; use reth_primitives::{ - BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, PeerId, SealedBlock, - SealedHeader, WithPeerId, B256, + BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, SealedBlock, + SealedHeader, B256, }; use std::{collections::HashMap, sync::Arc}; diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 304f394c8..0272c68d3 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -24,7 +24,8 @@ use crate::p2p::{ priority::Priority, }; use reth_consensus::{test_utils::TestConsensus, Consensus}; -use reth_primitives::{Header, HeadersDirection, PeerId, SealedHeader, WithPeerId}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{Header, HeadersDirection, SealedHeader}; /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] diff --git a/crates/net/common/Cargo.toml b/crates/net/common/Cargo.toml index 8d85fc906..0c3b253a5 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-network-types.workspace = true # async pin-project.workspace = true diff --git a/crates/net/common/src/ban_list.rs b/crates/net/common/src/ban_list.rs index 0527c8620..11d4c6049 100644 --- a/crates/net/common/src/ban_list.rs +++ b/crates/net/common/src/ban_list.rs @@ -1,6 +1,6 @@ //! Support for banning peers. -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Determines whether or not the IP is globally routable. diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 9a7cb943d..bd7e99ee6 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-net-common.workspace = true reth-net-nat.workspace = true +reth-network-types.workspace = true # ethereum alloy-rlp = { workspace = true, features = ["derive"] } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 071b81df9..061e4a33b 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -39,7 +39,8 @@ use discv5::{ use enr::Enr; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; -use reth_primitives::{bytes::Bytes, hex, ForkId, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{bytes::Bytes, hex, ForkId, B256}; use secp256k1::SecretKey; use std::{ cell::RefCell, @@ -210,7 +211,8 @@ impl Discv4 { /// # use std::io; /// use rand::thread_rng; /// use reth_discv4::{Discv4, Discv4Config}; - /// use reth_primitives::{pk2id, NodeRecord, PeerId}; + /// use reth_network_types::{pk2id, PeerId}; + /// use reth_primitives::NodeRecord; /// use secp256k1::SECP256K1; /// use std::{net::SocketAddr, str::FromStr}; /// # async fn t() -> io::Result<()> { diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 2e8dc1773..62e45db0e 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,5 +1,6 @@ use generic_array::GenericArray; -use reth_primitives::{keccak256, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{keccak256, NodeRecord}; /// The key type for the table. #[derive(Debug, Copy, Clone, Eq, PartialEq)] diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 059ecc5bb..da84dc05a 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,11 +1,12 @@ //! Discovery v4 protocol implementation. -use crate::{error::DecodePacketError, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; +use crate::{error::DecodePacketError, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; use enr::Enr; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, pk2id, EnrForkIdEntry, ForkId, NodeRecord, B256, + keccak256, EnrForkIdEntry, ForkId, NodeRecord, B256, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, diff --git a/crates/net/discv4/src/table.rs b/crates/net/discv4/src/table.rs index c7d75778c..00e1fe50c 100644 --- a/crates/net/discv4/src/table.rs +++ b/crates/net/discv4/src/table.rs @@ -1,6 +1,6 @@ //! Additional support for tracking nodes. -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Keeps track of nodes from which we have received a `Pong` message. diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index ccd4f9a03..dae3ea388 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -6,7 +6,8 @@ use crate::{ IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use rand::{thread_rng, Rng, RngCore}; -use reth_primitives::{hex, pk2id, ForkHash, ForkId, NodeRecord, B256}; +use reth_network_types::pk2id; +use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256}; use secp256k1::{SecretKey, SECP256K1}; use std::{ collections::{HashMap, HashSet}, diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml index 705ea17a8..a73888ae0 100644 --- a/crates/net/discv5/Cargo.toml +++ b/crates/net/discv5/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-metrics.workspace = true +reth-network-types.workspace = true # ethereum alloy-rlp.workspace = true diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index 088baf18e..162370bb4 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -3,7 +3,7 @@ use discv5::enr::{CombinedPublicKey, EnrPublicKey, NodeId}; use enr::Enr; -use reth_primitives::{id2pk, pk2id, PeerId}; +use reth_network_types::{id2pk, pk2id, PeerId}; use secp256k1::{PublicKey, SecretKey}; /// Extracts a [`CombinedPublicKey::Secp256k1`] from a [`discv5::Enr`] and converts it to a diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 14414abf7..5275956bf 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -22,7 +22,8 @@ use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; use rand::{Rng, RngCore}; -use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; use tracing::{debug, error, trace}; diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 18d7bf815..8076bd4e1 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-net-common.workspace = true +reth-network-types.workspace = true # ethereum secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index b72a45b31..e5ddc0fd1 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,7 +22,8 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_primitives::{pk2id, EnrForkIdEntry, ForkId, NodeRecord}; +use reth_network_types::pk2id; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index f1f14c85c..353956d3b 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -19,6 +19,7 @@ reth-tasks.workspace = true reth-provider.workspace = true reth-config.workspace = true reth-consensus.workspace = true +reth-network-types.workspace = true # async futures.workspace = true diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 032fb3ebc..dfe877a0b 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -6,9 +6,8 @@ use reth_interfaces::p2p::{ error::{DownloadError, DownloadResult}, priority::Priority, }; -use reth_primitives::{ - BlockBody, GotExpected, PeerId, SealedBlock, SealedHeader, WithPeerId, B256, -}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader, B256}; use std::{ collections::VecDeque, mem, diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ce8f3898b..362ed3c40 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -7,9 +7,10 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersFut, HeadersRequest}, priority::Priority, }; +use reth_network_types::PeerId; use reth_primitives::{ BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, - PeerId, SealedHeader, B256, + SealedHeader, B256, }; use std::{collections::HashMap, path::Path}; use thiserror::Error; diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 5c12a161a..273f97e58 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -16,9 +16,9 @@ use reth_interfaces::p2p::{ }, priority::Priority, }; +use reth_network_types::PeerId; use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, PeerId, SealedHeader, - B256, + BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, SealedHeader, B256, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index 2f3cf2f29..a7387fa88 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -3,7 +3,8 @@ use reth_interfaces::p2p::{ download::DownloadClient, priority::Priority, }; -use reth_primitives::{BlockBody, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, B256}; use std::{ collections::HashMap, fmt::Debug, diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index 461aad885..d4a4de32a 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] reth-primitives.workspace = true reth-net-common.workspace = true +reth-network-types.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } futures.workspace = true diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index bd1eb1d32..52398de4f 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -13,9 +13,10 @@ use ctr::Ctr64BE; use digest::{crypto_common::KeyIvInit, Digest}; use educe::Educe; use rand::{thread_rng, Rng}; +use reth_network_types::{id2pk, pk2id}; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - id2pk, pk2id, B128, B256, B512 as PeerId, + B128, B256, B512 as PeerId, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 47518aa25..4538fc059 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -175,7 +175,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::pk2id; + use reth_network_types::pk2id; use secp256k1::SECP256K1; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index cddc84cf9..0cfdfef24 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -19,6 +19,7 @@ reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-discv4.workspace = true reth-eth-wire-types.workspace = true +reth-network-types.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index cbd1e3150..8de509034 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -354,7 +354,8 @@ mod tests { use futures::{SinkExt, StreamExt}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::stream::ECIESStream; - use reth_primitives::{pk2id, ForkFilter, Head, NamedChain, B256, U256}; + use reth_network_types::pk2id; + use reth_primitives::{ForkFilter, Head, NamedChain, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 6ca8d9d99..f953c4aae 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -2,7 +2,8 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::derive_arbitrary; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_primitives::{constants::RETH_CLIENT_VERSION, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::constants::RETH_CLIENT_VERSION; use crate::protocol::Protocol; #[cfg(feature = "serde")] @@ -38,7 +39,7 @@ impl HelloMessageWithProtocols { /// /// ``` /// use reth_eth_wire::HelloMessageWithProtocols; - /// use reth_primitives::pk2id; + /// use reth_network_types::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -120,7 +121,7 @@ impl HelloMessage { /// /// ``` /// use reth_eth_wire::HelloMessage; - /// use reth_primitives::pk2id; + /// use reth_network_types::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -209,7 +210,7 @@ impl HelloMessageBuilder { mod tests { use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_primitives::pk2id; + use reth_network_types::pk2id; use secp256k1::{SecretKey, SECP256K1}; use crate::{ diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs index 3aa7bc1dd..a9bbe2fdb 100644 --- a/crates/net/eth-wire/src/muxdemux.rs +++ b/crates/net/eth-wire/src/muxdemux.rs @@ -357,9 +357,10 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use futures::{Future, SinkExt, StreamExt}; + use reth_network_types::pk2id; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - pk2id, ForkFilter, Hardfork, MAINNET, + ForkFilter, Hardfork, MAINNET, }; use secp256k1::{SecretKey, SECP256K1}; use std::{net::SocketAddr, pin::Pin}; diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index 1708e1ffa..0783e4dad 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -4,7 +4,8 @@ use crate::{ EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, }; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_primitives::{pk2id, Chain, ForkFilter, Head, B256, U256}; +use reth_network_types::pk2id; +use reth_primitives::{Chain, ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index dcf4089cd..81536aad9 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives.workspace = true reth-eth-wire.workspace = true reth-rpc-types.workspace = true reth-discv4.workspace = true +reth-network-types.workspace = true # eth enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 0c43273cd..6c3040bd9 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -14,7 +14,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_eth_wire::{DisconnectReason, EthVersion, Status}; -use reth_primitives::{NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::NodeRecord; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; pub use error::NetworkError; diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index b6a0fa846..2ace603e3 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -10,7 +10,8 @@ use crate::{ use enr::{secp256k1::SecretKey, Enr}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; -use reth_primitives::{Chain, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{Chain, NodeRecord}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index c06ff1518..aa6da6ea2 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -28,6 +28,7 @@ reth-provider.workspace = true reth-rpc-types.workspace = true reth-tokio-util.workspace = true reth-consensus.workspace = true +reth-network-types.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 3e89a1f3a..463bde78d 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -12,9 +12,9 @@ use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::network_key; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, - PeerId, MAINNET, + mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index d95f2f957..67d659454 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -12,7 +12,8 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; -use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; use secp256k1::SecretKey; use std::{ collections::VecDeque, diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index baa636b93..57e83391d 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -11,7 +11,8 @@ use reth_eth_wire::{ Receipts, }; use reth_interfaces::p2p::error::RequestResult; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection}; use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index eab474506..63e22abe0 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -11,7 +11,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_network_api::ReputationChangeKind; -use reth_primitives::{Header, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{Header, B256}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 1f85f242d..3a529c97e 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -9,7 +9,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_network_api::ReputationChangeKind; -use reth_primitives::{BlockBody, Header, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, Header, B256}; use std::{ collections::{HashMap, VecDeque}, sync::{ diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 738851f0a..2d18da9d4 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,7 +1,7 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. use crate::message::NewBlockMessage; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::task::{Context, Poll}; /// Abstraction over block import. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 5783c4ebd..39d29ee71 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -44,7 +44,8 @@ use reth_eth_wire::{ use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::ReputationChangeKind; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, NodeRecord}; use reth_provider::{BlockNumReader, BlockReader}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use reth_tasks::shutdown::GracefulShutdown; diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index b6861267a..2086fd60e 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -11,8 +11,9 @@ use reth_eth_wire::{ SharedTransactions, Transactions, }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; +use reth_network_types::PeerId; use reth_primitives::{ - BlockBody, Bytes, Header, PeerId, PooledTransactionsElement, ReceiptWithBloom, B256, + BlockBody, Bytes, Header, PooledTransactionsElement, ReceiptWithBloom, B256, }; use std::{ fmt, diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 7104e442e..86669bf19 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -13,7 +13,8 @@ use reth_network_api::{ NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; -use reth_primitives::{Head, NodeRecord, PeerId, TransactionSigned, B256}; +use reth_network_types::PeerId; +use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; use reth_rpc_types::NetworkStatus; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index b94c22db7..d6ae9c4da 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -14,7 +14,8 @@ use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; use reth_net_common::ban_list::BanList; use reth_network_api::{PeerKind, ReputationChangeKind}; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, NodeRecord}; use std::{ collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, fmt::Display, @@ -1558,7 +1559,8 @@ mod tests { }; use reth_net_common::ban_list::BanList; use reth_network_api::{Direction, ReputationChangeKind}; - use reth_primitives::{PeerId, B512}; + use reth_network_types::PeerId; + use reth_primitives::B512; use std::{ collections::HashSet, future::{poll_fn, Future}, diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 33c0a66e3..32bfb72ac 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -20,7 +20,7 @@ use reth_eth_wire::{ }; use reth_interfaces::p2p::error::RequestError; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{ collections::VecDeque, future::Future, @@ -769,7 +769,8 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use reth_net_common::bandwidth_meter::{BandwidthMeter, MeteredStream}; - use reth_primitives::{pk2id, ForkFilter, Hardfork, MAINNET}; + use reth_network_types::pk2id; + use reth_primitives::{ForkFilter, Hardfork, MAINNET}; use secp256k1::{SecretKey, SECP256K1}; use tokio::{ net::{TcpListener, TcpStream}, diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index 80298f324..c48fff618 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ DisconnectReason, EthVersion, Status, }; use reth_network_api::PeerInfo; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 94d41226e..95f426c54 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -19,7 +19,8 @@ use reth_net_common::{ bandwidth_meter::{BandwidthMeter, MeteredStream}, stream::HasRemoteAddr, }; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index d75a1aaa5..0020b4927 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -18,7 +18,8 @@ use reth_eth_wire::{ capability::Capabilities, BlockHashNumber, DisconnectReason, NewBlockHashes, Status, }; use reth_network_api::PeerKind; -use reth_primitives::{ForkId, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, B256}; use reth_provider::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, @@ -537,7 +538,8 @@ mod tests { BlockBodies, EthVersion, }; use reth_interfaces::p2p::{bodies::client::BodiesClient, error::RequestError}; - use reth_primitives::{BlockBody, Header, PeerId, B256}; + use reth_network_types::PeerId; + use reth_primitives::{BlockBody, Header, B256}; use reth_provider::test_utils::NoopProvider; use std::{ future::poll_fn, diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 136ece0bd..11ac5949a 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ errors::EthStreamError, EthVersion, Status, }; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use reth_provider::{BlockNumReader, BlockReader}; use std::{ io, diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 1419191aa..b72046a7f 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,5 +1,5 @@ use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a GethInstance diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 9720b7a93..a92934c0c 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -14,7 +14,8 @@ use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; use reth_network_api::{NetworkInfo, Peers}; -use reth_primitives::{PeerId, MAINNET}; +use reth_network_types::PeerId; +use reth_primitives::MAINNET; use reth_provider::{ test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, }; diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index cbec0f1e6..e82a20a31 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -41,7 +41,8 @@ use reth_eth_wire::{ PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; -use reth_primitives::{PeerId, PooledTransactionsElement, TxHash}; +use reth_network_types::PeerId; +use reth_primitives::{PooledTransactionsElement, TxHash}; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 83176c566..f7d03520f 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -26,9 +26,9 @@ use reth_interfaces::{ }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; +use reth_network_types::PeerId; use reth_primitives::{ - FromRecoveredPooledTransaction, PeerId, PooledTransactionsElement, TransactionSigned, TxHash, - B256, + FromRecoveredPooledTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, }; use reth_transaction_pool::{ error::{PoolError, PoolResult}, diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml new file mode 100644 index 000000000..841a76dfe --- /dev/null +++ b/crates/net/types/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "reth-network-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Network types and utils" + +[lints] +workspace = true + +[dependencies] +# reth +reth-rpc-types.workspace = true +reth-primitives.workspace = true + +# eth +enr.workspace = true + +# crypto +secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } + +# misc +serde_with.workspace = true + diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs new file mode 100644 index 000000000..ccd9757c9 --- /dev/null +++ b/crates/net/types/src/lib.rs @@ -0,0 +1,239 @@ +//! Network Types and Utilities. +//! +//! This crate manages and converts Ethereum network entities such as node records, peer IDs, and +//! Ethereum Node Records (ENRs) + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; +use std::{net::IpAddr, str::FromStr}; + +// Re-export PeerId for ease of use. +pub use enr::Enr; +pub use reth_rpc_types::{NodeRecord, PeerId}; + +/// This tag should be set to indicate to libsecp256k1 that the following bytes denote an +/// uncompressed pubkey. +/// +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` = `0x04` +/// +/// See: +const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4; + +/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [PeerId]. +#[inline] +pub fn pk2id(pk: &PublicKey) -> PeerId { + PeerId::from_slice(&pk.serialize_uncompressed()[1..]) +} + +/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the +/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. +#[inline] +pub fn id2pk(id: PeerId) -> Result { + // NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed + // public key. + let mut s = [0u8; UNCOMPRESSED_PUBLIC_KEY_SIZE]; + s[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; + s[1..].copy_from_slice(id.as_slice()); + PublicKey::from_slice(&s) +} + +/// A peer that can come in ENR or [NodeRecord] form. +#[derive( + Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr, +)] +pub enum AnyNode { + /// An "enode:" peer with full ip + NodeRecord(NodeRecord), + /// An "enr:" + Enr(Enr), + /// An incomplete "enode" with only a peer id + PeerId(PeerId), +} + +impl AnyNode { + /// Returns the peer id of the node. + pub fn peer_id(&self) -> PeerId { + match self { + AnyNode::NodeRecord(record) => record.id, + AnyNode::Enr(enr) => pk2id(&enr.public_key()), + AnyNode::PeerId(peer_id) => *peer_id, + } + } + + /// Returns the full node record if available. + pub fn node_record(&self) -> Option { + match self { + AnyNode::NodeRecord(record) => Some(*record), + AnyNode::Enr(enr) => { + let node_record = NodeRecord { + address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, + tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, + udp_port: enr.udp4().or_else(|| enr.udp6())?, + id: pk2id(&enr.public_key()), + } + .into_ipv4_mapped(); + Some(node_record) + } + _ => None, + } + } +} + +impl From for AnyNode { + fn from(value: NodeRecord) -> Self { + Self::NodeRecord(value) + } +} + +impl From> for AnyNode { + fn from(value: Enr) -> Self { + Self::Enr(value) + } +} + +impl FromStr for AnyNode { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Some(rem) = s.strip_prefix("enode://") { + if let Ok(record) = NodeRecord::from_str(s) { + return Ok(AnyNode::NodeRecord(record)) + } + // incomplete enode + if let Ok(peer_id) = PeerId::from_str(rem) { + return Ok(AnyNode::PeerId(peer_id)) + } + return Err(format!("invalid public key: {rem}")) + } + if s.starts_with("enr:") { + return Enr::from_str(s).map(AnyNode::Enr) + } + Err("missing 'enr:' prefix for base64-encoded record".to_string()) + } +} + +impl std::fmt::Display for AnyNode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AnyNode::NodeRecord(record) => write!(f, "{record}"), + AnyNode::Enr(enr) => write!(f, "{enr}"), + AnyNode::PeerId(peer_id) => { + write!(f, "enode://{}", reth_primitives::hex::encode(peer_id.as_slice())) + } + } + } +} + +/// Generic wrapper with peer id +#[derive(Debug)] +pub struct WithPeerId(PeerId, pub T); + +impl From<(PeerId, T)> for WithPeerId { + fn from(value: (PeerId, T)) -> Self { + Self(value.0, value.1) + } +} + +impl WithPeerId { + /// Wraps the value with the peerid. + pub fn new(peer: PeerId, value: T) -> Self { + Self(peer, value) + } + + /// Get the peer id + pub fn peer_id(&self) -> PeerId { + self.0 + } + + /// Get the underlying data + pub fn data(&self) -> &T { + &self.1 + } + + /// Returns ownership of the underlying data. + pub fn into_data(self) -> T { + self.1 + } + + /// Transform the data + pub fn transform>(self) -> WithPeerId { + WithPeerId(self.0, self.1.into()) + } + + /// Split the wrapper into [PeerId] and data tuple + pub fn split(self) -> (PeerId, T) { + (self.0, self.1) + } + + /// Maps the inner value to a new value using the given function. + pub fn map U>(self, op: F) -> WithPeerId { + WithPeerId(self.0, op(self.1)) + } +} + +impl WithPeerId> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithPeerId)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithPeerId(self.0, v)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use secp256k1::SECP256K1; + + #[test] + fn test_node_record_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: AnyNode = url.parse().unwrap(); + assert_eq!(node, AnyNode::NodeRecord(NodeRecord { + address: IpAddr::V4([10,3,58,6].into()), + tcp_port: 30303, + udp_port: 30301, + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), + })); + assert_eq!(node.to_string(), url) + } + + #[test] + fn test_peer_id_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"; + let node: AnyNode = url.parse().unwrap(); + assert_eq!(node, AnyNode::PeerId("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap())); + assert_eq!(node.to_string(), url); + + let url = "enode://"; + let err = url.parse::().unwrap_err(); + assert_eq!(err, "invalid public key: "); + } + + // + #[test] + fn test_enr_parse() { + let url = "enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"; + let node: AnyNode = url.parse().unwrap(); + assert_eq!( + node.peer_id(), + "0xca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f" + .parse::() + .unwrap() + ); + assert_eq!(node.to_string(), url); + } + + #[test] + fn pk2id2pk() { + let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); + assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index ab6b44303..4fa504665 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -27,7 +27,6 @@ alloy-trie = { workspace = true, features = ["serde"] } nybbles = { workspace = true, features = ["serde", "rlp"] } alloy-genesis.workspace = true alloy-eips.workspace = true -enr.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -43,7 +42,6 @@ itertools.workspace = true modular-bitfield.workspace = true once_cell.workspace = true rayon.workspace = true -serde_with.workspace = true serde.workspace = true serde_json.workspace = true sha2 = { version = "0.10.7", optional = true } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 9e77b4c15..1c8808628 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -33,7 +33,6 @@ mod header; mod integer_list; mod log; mod net; -mod peer; pub mod proofs; mod prune; mod receipt; @@ -77,7 +76,6 @@ pub use net::{ NodeRecordParseError, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, }; -pub use peer::{id2pk, pk2id, AnyNode, PeerId, WithPeerId}; pub use prune::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneModes, PruneProgress, PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index c2ada1e88..fe22eae0f 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-engine-primitives.workspace = true +reth-network-types.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/admin.rs b/crates/rpc/rpc-api/src/admin.rs index 7497d1205..4c31221cd 100644 --- a/crates/rpc/rpc-api/src/admin.rs +++ b/crates/rpc/rpc-api/src/admin.rs @@ -1,5 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{AnyNode, NodeRecord}; +use reth_network_types::AnyNode; +use reth_primitives::NodeRecord; use reth_rpc_types::{admin::NodeInfo, PeerInfo}; /// Admin namespace rpc interface that gives access to several non-standard RPC methods. diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index d5bd324ae..81788f0a3 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -27,6 +27,7 @@ reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true revm-inspectors.workspace = true reth-evm.workspace = true +reth-network-types.workspace = true # eth alloy-rlp.workspace = true diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 3f5578433..6f3125e06 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -3,7 +3,8 @@ use alloy_primitives::B256; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_network_api::{NetworkInfo, PeerKind, Peers}; -use reth_primitives::{AnyNode, ChainSpec, NodeRecord}; +use reth_network_types::AnyNode; +use reth_primitives::{ChainSpec, NodeRecord}; use reth_rpc_api::AdminApiServer; use reth_rpc_types::{ admin::{EthProtocolInfo, NodeInfo, Ports, ProtocolInfo}, diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 2692c9410..f3bd16a5e 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -51,6 +51,7 @@ reth-revm.workspace = true reth-static-file.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-network-types.workspace = true alloy-rlp.workspace = true itertools.workspace = true diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 4b6df9391..3fea3e04d 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -17,7 +17,8 @@ //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; //! # use reth_revm::EvmProcessorFactory; -//! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; +//! # use reth_primitives::{MAINNET, B256, PruneModes}; +//! # use reth_network_types::PeerId; //! # use reth_stages::Pipeline; //! # use reth_stages::sets::DefaultStages; //! # use tokio::sync::watch; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 5b6b85486..4365245cf 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -20,6 +20,7 @@ reth-tasks.workspace = true revm.workspace = true alloy-rlp.workspace = true reth-revm = { workspace = true, optional = true } +reth-network-types.workspace = true # async/futures futures-util.workspace = true diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c5603ec7b..79b9af698 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -9,13 +9,13 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_eth_wire::HandleMempoolData; +use reth_network_types::PeerId; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, - IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, - TryFromRecoveredTransaction, TxEip4844, TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, U256, + IntoRecoveredTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, + SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, + TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index a9c7f2513..139cb0e18 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -6,14 +6,17 @@ edition.workspace = true license.workspace = true [dependencies] -once_cell.workspace = true -eyre.workspace = true - reth-primitives.workspace = true reth-network.workspace = true reth-discv4.workspace = true reth-eth-wire.workspace = true reth-ecies.workspace = true -futures.workspace = true +reth-network-types.workspace = true + secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } + +futures.workspace = true tokio.workspace = true + +eyre.workspace = true +once_cell.workspace = true diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 737daf728..e97cb3662 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -16,8 +16,9 @@ use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_network::config::rng_secret_key; +use reth_network_types::pk2id; use reth_primitives::{ - mainnet_nodes, pk2id, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, + mainnet_nodes, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, }; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::TcpStream; From 43f58f16dd23fc9e75de06e11222165ffe71b1f6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 07:55:06 +0200 Subject: [PATCH 086/250] chore: get rid of compat call (#7930) --- crates/payload/validator/src/lib.rs | 22 +++-- crates/primitives/src/block.rs | 126 +++++++++++++++------------- 2 files changed, 80 insertions(+), 68 deletions(-) diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 4f9f51507..c3b25aef9 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -10,7 +10,7 @@ use reth_primitives::{ChainSpec, SealedBlock}; use reth_rpc_types::{engine::MaybeCancunPayloadFields, ExecutionPayload, PayloadError}; -use reth_rpc_types_compat::engine::payload::{try_into_block, validate_block_hash}; +use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; /// Execution payload validator. @@ -100,21 +100,27 @@ impl ExecutionPayloadValidator { payload: ExecutionPayload, cancun_fields: MaybeCancunPayloadFields, ) -> Result { - let block_hash = payload.block_hash(); + let expected_hash = payload.block_hash(); // First parse the block - let block = try_into_block(payload, cancun_fields.parent_beacon_block_root())?; + let sealed_block = + try_into_block(payload, cancun_fields.parent_beacon_block_root())?.seal_slow(); - let cancun_active = self.is_cancun_active_at_timestamp(block.timestamp); + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + }) + } + + let cancun_active = self.is_cancun_active_at_timestamp(sealed_block.timestamp); - if !cancun_active && block.has_blob_transactions() { + if !cancun_active && sealed_block.has_blob_transactions() { // cancun not active but blob transactions present return Err(PayloadError::PreCancunBlockWithBlobTransactions) } - // Ensure the hash included in the payload matches the block hash - let sealed_block = validate_block_hash(block_hash, block)?; - // EIP-4844 checks self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 06c08db1f..864e7954f 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -296,66 +296,6 @@ pub struct SealedBlock { pub withdrawals: Option, } -/// Generates a header which is valid __with respect to past and future forks__. This means, for -/// example, that if the withdrawals root is present, the base fee per gas is also present. -/// -/// If blob gas used were present, then the excess blob gas and parent beacon block root are also -/// present. In this example, the withdrawals root would also be present. -/// -/// This __does not, and should not guarantee__ that the header is valid with respect to __anything -/// else__. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_valid_header( - mut header: Header, - eip_4844_active: bool, - blob_gas_used: u64, - excess_blob_gas: u64, - parent_beacon_block_root: B256, -) -> Header { - // EIP-1559 logic - if header.base_fee_per_gas.is_none() { - // If EIP-1559 is not active, clear related fields - header.withdrawals_root = None; - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if header.withdrawals_root.is_none() { - // If EIP-4895 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if eip_4844_active { - // Set fields based on EIP-4844 being active - header.blob_gas_used = Some(blob_gas_used); - header.excess_blob_gas = Some(excess_blob_gas); - header.parent_beacon_block_root = Some(parent_beacon_block_root); - } else { - // If EIP-4844 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } - - header -} - -#[cfg(any(test, feature = "arbitrary"))] -prop_compose! { - /// Generates a proptest strategy for constructing an instance of a header which is valid __with - /// respect to past and future forks__. - /// - /// See docs for [generate_valid_header] for more information. - pub fn valid_header_strategy()( - header in any::
(), - eip_4844_active in any::(), - blob_gas_used in any::(), - excess_blob_gas in any::(), - parent_beacon_block_root in any::() - ) -> Header { - generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) - } -} - impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] @@ -458,6 +398,12 @@ impl SealedBlock { self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() } + /// Returns whether or not the block contains any blob transactions. + #[inline] + pub fn has_blob_transactions(&self) -> bool { + self.body.iter().any(|tx| tx.is_eip4844()) + } + /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -653,6 +599,66 @@ impl From for BlockBody { } } +/// Generates a header which is valid __with respect to past and future forks__. This means, for +/// example, that if the withdrawals root is present, the base fee per gas is also present. +/// +/// If blob gas used were present, then the excess blob gas and parent beacon block root are also +/// present. In this example, the withdrawals root would also be present. +/// +/// This __does not, and should not guarantee__ that the header is valid with respect to __anything +/// else__. +#[cfg(any(test, feature = "arbitrary"))] +pub fn generate_valid_header( + mut header: Header, + eip_4844_active: bool, + blob_gas_used: u64, + excess_blob_gas: u64, + parent_beacon_block_root: B256, +) -> Header { + // EIP-1559 logic + if header.base_fee_per_gas.is_none() { + // If EIP-1559 is not active, clear related fields + header.withdrawals_root = None; + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if header.withdrawals_root.is_none() { + // If EIP-4895 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if eip_4844_active { + // Set fields based on EIP-4844 being active + header.blob_gas_used = Some(blob_gas_used); + header.excess_blob_gas = Some(excess_blob_gas); + header.parent_beacon_block_root = Some(parent_beacon_block_root); + } else { + // If EIP-4844 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } + + header +} + +#[cfg(any(test, feature = "arbitrary"))] +prop_compose! { + /// Generates a proptest strategy for constructing an instance of a header which is valid __with + /// respect to past and future forks__. + /// + /// See docs for [generate_valid_header] for more information. + pub fn valid_header_strategy()( + header in any::
(), + eip_4844_active in any::(), + blob_gas_used in any::(), + excess_blob_gas in any::(), + parent_beacon_block_root in any::() + ) -> Header { + generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) + } +} + #[cfg(test)] mod tests { use super::{BlockNumberOrTag::*, *}; From 5f15af5401b8df52ebcc39d451e220c4022ee10b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 17:08:37 +0200 Subject: [PATCH 087/250] chore: bump ratatui; rm unused (#7934) --- Cargo.lock | 76 +++++++++++++++++----------------- Cargo.toml | 1 + bin/reth/Cargo.toml | 9 +--- crates/node-builder/Cargo.toml | 2 +- 4 files changed, 41 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a40fb4513..478d38713 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -127,7 +127,7 @@ dependencies = [ "num_enum", "proptest", "serde", - "strum 0.26.2", + "strum", ] [[package]] @@ -1470,6 +1470,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" version = "1.0.95" @@ -1673,11 +1682,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ "crossterm", - "strum 0.26.2", - "strum_macros 0.26.2", + "strum", + "strum_macros", "unicode-width", ] +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + [[package]] name = "concat-kdf" version = "0.1.0" @@ -6017,19 +6039,20 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.25.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5659e52e4ba6e07b2dad9f1158f578ef84a73762625ddb51536019f34d180eb" +checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" dependencies = [ "bitflags 2.5.0", "cassowary", + "compact_str", "crossterm", "indoc", "itertools 0.12.1", "lru", "paste", "stability", - "strum 0.25.0", + "strum", "unicode-segmentation", "unicode-width", ] @@ -6262,7 +6285,6 @@ dependencies = [ "rand 0.8.5", "ratatui", "rayon", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", @@ -6294,7 +6316,6 @@ dependencies = [ "reth-rpc", "reth-rpc-api", "reth-rpc-builder", - "reth-rpc-engine-api", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", @@ -6307,7 +6328,6 @@ dependencies = [ "serde_json", "similar-asserts", "tempfile", - "thiserror", "tikv-jemallocator", "tokio", "toml", @@ -6537,7 +6557,7 @@ dependencies = [ "rustc-hash", "serde", "serde_json", - "strum 0.26.2", + "strum", "tempfile", "test-fuzz", "thiserror", @@ -7412,7 +7432,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "strum 0.26.2", + "strum", "sucds", "tempfile", "test-fuzz", @@ -7446,7 +7466,7 @@ dependencies = [ "reth-rpc-types", "reth-trie", "revm", - "strum 0.26.2", + "strum", "tempfile", "tokio", "tokio-stream", @@ -7602,7 +7622,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "strum 0.26.2", + "strum", "thiserror", "tokio", "tower", @@ -8868,12 +8888,12 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "stability" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" +checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.60", ] [[package]] @@ -8912,35 +8932,13 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros 0.25.3", -] - [[package]] name = "strum" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.2", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.60", + "strum_macros", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d56392c1d..478c2b453 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -310,6 +310,7 @@ bytes = "1.5" bitflags = "2.4" clap = "4" derive_more = "0.99.17" +fdlimit = "0.3.0" eyre = "0.6" tracing = "0.1.0" tracing-appender = "0.2" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 5e47506db..995b29610 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -24,11 +24,9 @@ reth-stages.workspace = true reth-interfaces = { workspace = true, features = ["clap"] } reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-cli-runner.workspace = true reth-consensus-common.workspace = true reth-blockchain-tree.workspace = true -reth-rpc-engine-api.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types.workspace = true @@ -65,7 +63,7 @@ alloy-rlp.workspace = true tracing.workspace = true # io -fdlimit = "0.3.0" +fdlimit.workspace = true serde.workspace = true serde_json.workspace = true confy.workspace = true @@ -81,7 +79,7 @@ rand.workspace = true # tui comfy-table = "7.0" crossterm = "0.27.0" -ratatui = "0.25.0" +ratatui = { version = "0.26", default-features = false, features = ["crossterm"] } human_bytes = "0.4.1" # async @@ -104,7 +102,6 @@ itertools.workspace = true rayon.workspace = true boyer-moore-magiclen = "0.2.16" ahash = "0.8" -thiserror.workspace = true # p2p discv5.workspace = true @@ -136,10 +133,8 @@ optimism = [ "reth-revm/optimism", "reth-interfaces/optimism", "reth-rpc/optimism", - "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-beacon-consensus/optimism", - "reth-auto-seal-consensus/optimism", "reth-blockchain-tree/optimism", "dep:reth-node-optimism", "reth-node-core/optimism", diff --git a/crates/node-builder/Cargo.toml b/crates/node-builder/Cargo.toml index aae73f5a6..270b0dfe5 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node-builder/Cargo.toml @@ -51,6 +51,6 @@ tokio = { workspace = true, features = [ ## misc aquamarine.workspace = true eyre.workspace = true -fdlimit = "0.3.0" +fdlimit.workspace = true confy.workspace = true rayon.workspace = true From 2deb259ead0793740d6b1dc0535ef7afa3c3f80f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 17:09:28 +0200 Subject: [PATCH 088/250] feat: replace duplicate Withdrawal type with alloy (#7931) --- .../ethereum/engine-primitives/src/payload.rs | 14 +-- crates/payload/optimism/src/payload.rs | 11 +- crates/primitives/src/withdrawal.rs | 112 ++++++++++-------- crates/revm/src/state_change.rs | 2 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 10 +- crates/rpc/rpc-types-compat/src/engine/mod.rs | 5 +- .../rpc-types-compat/src/engine/payload.rs | 70 ++--------- 7 files changed, 82 insertions(+), 142 deletions(-) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index a6c47ebde..a354e0588 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -11,8 +11,7 @@ use reth_rpc_types::engine::{ PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, - convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, + block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, }; use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; @@ -159,22 +158,13 @@ impl EthPayloadBuilderAttributes { pub fn new(parent: B256, attributes: PayloadAttributes) -> Self { let id = payload_id(&parent, &attributes); - let withdraw = attributes.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals - .into_iter() - .map(convert_standalone_withdraw_to_withdrawal) // Removed the parentheses here - .collect(), - ) - }); - Self { id, parent, timestamp: attributes.timestamp, suggested_fee_recipient: attributes.suggested_fee_recipient, prev_randao: attributes.prev_randao, - withdrawals: withdraw.unwrap_or_default(), + withdrawals: attributes.withdrawals.unwrap_or_default().into(), parent_beacon_block_root: attributes.parent_beacon_block_root, } } diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index d753370fd..b90d05d5f 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -16,8 +16,7 @@ use reth_rpc_types::engine::{ OptimismPayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, - convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, + block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, }; use revm::primitives::HandlerCfg; use std::sync::Arc; @@ -54,19 +53,13 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { (payload_id_optimism(&parent, &attributes, &transactions), transactions) }; - let withdraw = attributes.payload_attributes.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals.into_iter().map(convert_standalone_withdraw_to_withdrawal).collect(), - ) - }); - let payload_attributes = EthPayloadBuilderAttributes { id, parent, timestamp: attributes.payload_attributes.timestamp, suggested_fee_recipient: attributes.payload_attributes.suggested_fee_recipient, prev_randao: attributes.payload_attributes.prev_randao, - withdrawals: withdraw.unwrap_or_default(), + withdrawals: attributes.payload_attributes.withdrawals.unwrap_or_default().into(), parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, }; diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index a348b6a05..e47b2816a 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,51 +1,12 @@ -use crate::{constants::GWEI_TO_WEI, serde_helper::u64_via_ruint, Address}; -use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs::{main_codec, Compact}; -use std::{ - mem, - ops::{Deref, DerefMut}, -}; +//! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -/// Withdrawal represents a validator withdrawal from the consensus layer. -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] -pub struct Withdrawal { - /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_via_ruint")] - pub index: u64, - /// Index of validator associated with withdrawal. - #[serde(with = "u64_via_ruint", rename = "validatorIndex")] - pub validator_index: u64, - /// Target address for withdrawn ether. - pub address: Address, - /// Value of the withdrawal in gwei. - #[serde(with = "u64_via_ruint")] - pub amount: u64, -} +use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; +use reth_codecs::{main_codec, Compact}; +use std::ops::{Deref, DerefMut}; -impl Withdrawal { - /// Return the withdrawal amount in wei. - pub fn amount_wei(&self) -> u128 { - self.amount as u128 * GWEI_TO_WEI as u128 - } - - /// Calculate a heuristic for the in-memory size of the [Withdrawal]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() - } -} - -impl From for Withdrawal { - fn from(withdrawal: reth_rpc_types::Withdrawal) -> Self { - Self { - index: withdrawal.index, - validator_index: withdrawal.index, - address: withdrawal.address, - amount: withdrawal.amount, - } - } -} +/// Re-export from `alloy_eips`. +#[doc(inline)] +pub use alloy_eips::eip4895::Withdrawal; /// Represents a collection of Withdrawals. #[main_codec] @@ -61,13 +22,13 @@ impl Withdrawals { /// Calculate the total size, including capacity, of the Withdrawals. #[inline] pub fn total_size(&self) -> usize { - self.size() + self.capacity() * std::mem::size_of::() + self.capacity() * std::mem::size_of::() } /// Calculate a heuristic for the in-memory size of the [Withdrawals]. #[inline] pub fn size(&self) -> usize { - self.iter().map(Withdrawal::size).sum() + self.len() * std::mem::size_of::() } /// Get an iterator over the Withdrawals. @@ -115,15 +76,45 @@ impl DerefMut for Withdrawals { } } -impl From> for Withdrawals { - fn from(withdrawals: Vec) -> Self { - Self(withdrawals.into_iter().map(Into::into).collect()) +impl From> for Withdrawals { + fn from(withdrawals: Vec) -> Self { + Self(withdrawals) } } #[cfg(test)] mod tests { use super::*; + use crate::{serde_helper::u64_via_ruint, Address}; + use alloy_rlp::{RlpDecodable, RlpEncodable}; + use proptest::proptest; + + /// This type is kept for compatibility tests after the codec support was added to alloy-eips + /// Withdrawal type natively + #[main_codec] + #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] + struct RethWithdrawal { + /// Monotonically increasing identifier issued by consensus layer. + #[serde(with = "u64_via_ruint")] + index: u64, + /// Index of validator associated with withdrawal. + #[serde(with = "u64_via_ruint", rename = "validatorIndex")] + validator_index: u64, + /// Target address for withdrawn ether. + address: Address, + /// Value of the withdrawal in gwei. + #[serde(with = "u64_via_ruint")] + amount: u64, + } + + impl PartialEq for RethWithdrawal { + fn eq(&self, other: &Withdrawal) -> bool { + self.index == other.index && + self.validator_index == other.validator_index && + self.address == other.address && + self.amount == other.amount + } + } // #[test] @@ -134,4 +125,23 @@ mod tests { let s = serde_json::to_string(&withdrawals).unwrap(); assert_eq!(input, s); } + + proptest!( + #[test] + fn test_roundtrip_withdrawal_compat(withdrawal: RethWithdrawal) { + // Convert to buffer and then create alloy_access_list from buffer and + // compare + let mut compacted_reth_withdrawal = Vec::::new(); + let len = withdrawal.clone().to_compact(&mut compacted_reth_withdrawal); + + // decode the compacted buffer to AccessList + let alloy_withdrawal = Withdrawal::from_compact(&compacted_reth_withdrawal, len).0; + assert_eq!(withdrawal, alloy_withdrawal); + + let mut compacted_alloy_withdrawal = Vec::::new(); + let alloy_len = alloy_withdrawal.to_compact(&mut compacted_alloy_withdrawal); + assert_eq!(len, alloy_len); + assert_eq!(compacted_reth_withdrawal, compacted_alloy_withdrawal); + } + ); } diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 5d38c656e..d2b0a6b5b 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -152,7 +152,7 @@ pub fn insert_post_block_withdrawals_balance_increments( for withdrawal in withdrawals.iter() { if withdrawal.amount > 0 { *balance_increments.entry(withdrawal.address).or_default() += - withdrawal.amount_wei(); + withdrawal.amount_wei().to::(); } } } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 8853b5c88..0979af400 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -13,8 +13,8 @@ use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; use reth_rpc_types_compat::engine::payload::{ - convert_standalone_withdraw_to_withdrawal, convert_to_payload_body_v1, try_block_to_payload, - try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, + convert_to_payload_body_v1, try_block_to_payload, try_block_to_payload_v1, + try_into_sealed_block, try_payload_v1_to_block, }; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { @@ -46,11 +46,7 @@ fn payload_body_roundtrip() { .map(|x| TransactionSigned::decode(&mut &x[..])) .collect::, _>>(), ); - let withdraw = payload_body.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals.into_iter().map(convert_standalone_withdraw_to_withdrawal).collect(), - ) - }); + let withdraw = payload_body.withdrawals.map(Withdrawals::new); assert_eq!(block.withdrawals, withdraw); } } diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs index e03ba6f4c..e14b83500 100644 --- a/crates/rpc/rpc-types-compat/src/engine/mod.rs +++ b/crates/rpc/rpc-types-compat/src/engine/mod.rs @@ -1,6 +1,3 @@ //! Standalone functions for engine specific rpc type conversions pub mod payload; -pub use payload::{ - convert_standalone_withdraw_to_withdrawal, convert_withdrawal_to_standalone_withdraw, - try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, -}; +pub use payload::{try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 469475301..fdacab4e6 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -4,7 +4,7 @@ use reth_primitives::{ constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, proofs::{self}, - Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawal, Withdrawals, B256, U256, + Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, @@ -65,11 +65,8 @@ pub fn try_payload_v2_to_block(payload: ExecutionPayloadV2) -> Result ExecutionPayloadV1 { /// Converts [SealedBlock] to [ExecutionPayloadV2] pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { let transactions = value.raw_transactions(); - let standalone_withdrawals: Vec = value - .withdrawals - .clone() - .unwrap_or_default() - .into_iter() - .map(convert_withdrawal_to_standalone_withdraw) - .collect(); ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { @@ -149,7 +139,7 @@ pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { block_hash: value.hash(), transactions, }, - withdrawals: standalone_withdrawals, + withdrawals: value.withdrawals.unwrap_or_default().into_inner(), } } @@ -157,15 +147,9 @@ pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { let transactions = value.raw_transactions(); - let withdrawals: Vec = value - .withdrawals - .clone() - .unwrap_or_default() - .into_iter() - .map(convert_withdrawal_to_standalone_withdraw) - .collect(); - ExecutionPayloadV3 { + blob_gas_used: value.blob_gas_used.unwrap_or_default(), + excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -183,11 +167,8 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { block_hash: value.hash(), transactions, }, - withdrawals, + withdrawals: value.withdrawals.unwrap_or_default().into_inner(), }, - - blob_gas_used: value.blob_gas_used.unwrap_or_default(), - excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), } } @@ -222,11 +203,8 @@ pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> Ex /// Converts [SealedBlock] to [ExecutionPayloadInputV2] pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { - let withdraw = value.withdrawals.clone().map(|withdrawals| { - withdrawals.into_iter().map(convert_withdrawal_to_standalone_withdraw).collect::>() - }); ExecutionPayloadInputV2 { - withdrawals: withdraw, + withdrawals: value.withdrawals.clone().map(Withdrawals::into_inner), execution_payload: try_block_to_payload_v1(value), } } @@ -295,30 +273,6 @@ pub fn validate_block_hash( Ok(sealed_block) } -/// Converts [Withdrawal] to [reth_rpc_types::Withdrawal] -pub fn convert_withdrawal_to_standalone_withdraw( - withdrawal: Withdrawal, -) -> reth_rpc_types::Withdrawal { - reth_rpc_types::Withdrawal { - index: withdrawal.index, - validator_index: withdrawal.validator_index, - address: withdrawal.address, - amount: withdrawal.amount, - } -} - -/// Converts [reth_rpc_types::Withdrawal] to [Withdrawal] -pub fn convert_standalone_withdraw_to_withdrawal( - standalone: reth_rpc_types::Withdrawal, -) -> Withdrawal { - Withdrawal { - index: standalone.index, - validator_index: standalone.validator_index, - address: standalone.address, - amount: standalone.amount, - } -} - /// Converts [Block] to [ExecutionPayloadBodyV1] pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { let transactions = value.body.into_iter().map(|tx| { @@ -326,10 +280,10 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { tx.encode_enveloped(&mut out); out.into() }); - let withdraw: Option> = value.withdrawals.map(|withdrawals| { - withdrawals.into_iter().map(convert_withdrawal_to_standalone_withdraw).collect::>() - }); - ExecutionPayloadBodyV1 { transactions: transactions.collect(), withdrawals: withdraw } + ExecutionPayloadBodyV1 { + transactions: transactions.collect(), + withdrawals: value.withdrawals.map(Withdrawals::into_inner), + } } /// Transforms a [SealedBlock] into a [ExecutionPayloadV1] From 2b6921b1614f1dc5f63211b5516e393ea3496258 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 27 Apr 2024 17:10:26 +0200 Subject: [PATCH 089/250] chore: replace fnv with fxhashmap (#7927) --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/storage/db/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 2 +- crates/transaction-pool/src/identifier.rs | 4 ++-- crates/transaction-pool/src/pool/parked.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 6 +++--- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 478d38713..3c1b2b70f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7826,7 +7826,6 @@ dependencies = [ "auto_impl", "bitflags 2.5.0", "criterion", - "fnv", "futures-util", "itertools 0.12.1", "metrics", @@ -7844,6 +7843,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "revm", + "rustc-hash", "schnellru", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 478c2b453..e66aa3413 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -321,6 +321,7 @@ serde_with = "3.3.0" humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" +rustc-hash = "1.1.0" schnellru = "0.2" strum = "0.26" rayon = "1.7" diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 461a84f3e..f816cc2c0 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -37,7 +37,7 @@ tempfile = { workspace = true, optional = true } derive_more.workspace = true eyre.workspace = true paste.workspace = true -rustc-hash = "1.1.0" +rustc-hash.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 4365245cf..ebb6e497f 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -36,9 +36,9 @@ metrics.workspace = true aquamarine.workspace = true thiserror.workspace = true tracing.workspace = true +rustc-hash.workspace = true schnellru.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } -fnv = "1.0.7" bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 6ec1527bd..4e4bec4d1 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,5 +1,5 @@ -use fnv::FnvHashMap; use reth_primitives::Address; +use rustc_hash::FxHashMap; use std::collections::HashMap; /// An internal mapping of addresses. @@ -13,7 +13,7 @@ pub(crate) struct SenderIdentifiers { /// Assigned `SenderId` for an `Address`. address_to_id: HashMap, /// Reverse mapping of `SenderId` to `Address`. - sender_to_address: FnvHashMap, + sender_to_address: FxHashMap, } impl SenderIdentifiers { diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 2815deaee..ef0766bed 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -3,7 +3,7 @@ use crate::{ pool::size::SizeTracker, PoolTransaction, SubPoolLimit, ValidPoolTransaction, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, }; -use fnv::FnvHashMap; +use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ cmp::Ordering, @@ -40,7 +40,7 @@ pub struct ParkedPool { last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. - sender_transaction_count: FnvHashMap, + sender_transaction_count: FxHashMap, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`]. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index cdd897448..44a90f1cf 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,7 +18,6 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use fnv::FnvHashMap; use itertools::Itertools; use reth_primitives::{ constants::{ @@ -26,6 +25,7 @@ use reth_primitives::{ }, Address, TxHash, B256, }; +use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ cmp::Ordering, @@ -44,7 +44,7 @@ use tracing::trace; /// include_mmd!("docs/mermaid/txpool.mmd") pub struct TxPool { /// Contains the currently known information about the senders. - sender_info: FnvHashMap, + sender_info: FxHashMap, /// pending subpool /// /// Holds transactions that are ready to be executed on the current state. @@ -903,7 +903,7 @@ pub(crate) struct AllTransactions { /// _All_ transaction in the pool sorted by their sender and nonce pair. txs: BTreeMap>, /// Tracks the number of transactions by sender that are currently in the pool. - tx_counter: FnvHashMap, + tx_counter: FxHashMap, /// The current block number the pool keeps track of. last_seen_block_number: u64, /// The current block hash the pool keeps track of. From b3bac08f6849de639fbff353f8f0daab1eb5b0ee Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 28 Apr 2024 06:11:58 +0200 Subject: [PATCH 090/250] chore(deps): weekly `cargo update` (#7937) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 72 ++++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c1b2b70f..e14f5db80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -160,7 +160,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -492,7 +492,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c8d6e74e4feeaa2bcfdecfd3da247ab53c67bd654ba1907270c32e02b142331" dependencies = [ - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -824,7 +824,7 @@ checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" dependencies = [ "concurrent-queue", "event-listener 5.3.0", - "event-listener-strategy 0.5.1", + "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", ] @@ -894,9 +894,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.7.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" @@ -959,7 +959,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" dependencies = [ - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-core", "pin-project", "tokio", @@ -1164,18 +1164,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ "async-channel 2.2.1", "async-lock", "async-task", - "fastrand 2.0.2", "futures-io", "futures-lite 2.3.0", "piper", - "tracing", ] [[package]] @@ -1711,9 +1709,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -2839,9 +2837,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ "event-listener 5.3.0", "pin-project-lite", @@ -2945,9 +2943,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -3013,9 +3011,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" dependencies = [ "crc32fast", "miniz_oxide", @@ -3660,7 +3658,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.28", "log", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", @@ -5578,7 +5576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" dependencies = [ "atomic-waker", - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-io", ] @@ -8198,9 +8196,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -8486,9 +8484,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.198" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] @@ -8504,9 +8502,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", @@ -8589,9 +8587,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb86f9315df5df6a70eae0cc22395a44e544a0d8897586820770a35ede74449" +checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ "futures", "log", @@ -8603,9 +8601,9 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9bb72430492e9549b0c4596725c0f82729bff861c45aa8099c0a8e67fc3b721" +checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", @@ -9091,7 +9089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.2", + "fastrand 2.1.0", "rustix", "windows-sys 0.52.0", ] @@ -9358,7 +9356,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", + "rustls 0.21.12", "tokio", ] @@ -9454,7 +9452,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -9843,9 +9841,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "universal-hash" @@ -10320,9 +10318,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" +checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" dependencies = [ "memchr", ] From bf66a3dd27870e39ed4a99d1bbc6675e7bb4ff4d Mon Sep 17 00:00:00 2001 From: Elijah Hampton Date: Sun, 28 Apr 2024 05:44:18 -0400 Subject: [PATCH 091/250] Move network.rs example to its own folder (#7936) Co-authored-by: Elijah Hampton --- Cargo.lock | 34 ++++++------------ Cargo.toml | 2 +- examples/Cargo.toml | 30 ---------------- examples/README.md | 36 ++++++++++---------- examples/network/Cargo.toml | 13 +++++++ examples/{network.rs => network/src/main.rs} | 2 +- 6 files changed, 44 insertions(+), 73 deletions(-) delete mode 100644 examples/Cargo.toml create mode 100644 examples/network/Cargo.toml rename examples/{network.rs => network/src/main.rs} (96%) diff --git a/Cargo.lock b/Cargo.lock index e14f5db80..50fa59f01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2845,29 +2845,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "examples" -version = "0.0.0" -dependencies = [ - "async-trait", - "eyre", - "futures", - "reth-beacon-consensus", - "reth-blockchain-tree", - "reth-db", - "reth-network", - "reth-network-api", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-rpc-builder", - "reth-rpc-types", - "reth-rpc-types-compat", - "reth-tasks", - "reth-transaction-pool", - "tokio", -] - [[package]] name = "exex-minimal" version = "0.0.0" @@ -5066,6 +5043,17 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "network" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth-network", + "reth-provider", + "tokio", +] + [[package]] name = "network-txpool" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index e66aa3413..7edc96312 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,7 +72,6 @@ members = [ "crates/transaction-pool/", "crates/trie/", "crates/trie-parallel/", - "examples/", "examples/node-custom-rpc/", "examples/beacon-api-sse/", "examples/node-event-hooks/", @@ -82,6 +81,7 @@ members = [ "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", + "examples/network/", "examples/network-txpool/", "examples/rpc-db/", "examples/txpool-tracing/", diff --git a/examples/Cargo.toml b/examples/Cargo.toml deleted file mode 100644 index 02c571786..000000000 --- a/examples/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "examples" -version = "0.0.0" -publish = false -edition.workspace = true -license.workspace = true - -[dev-dependencies] -reth-primitives.workspace = true -reth-db.workspace = true -reth-provider.workspace = true -reth-rpc-builder.workspace = true -reth-rpc-types.workspace = true -reth-rpc-types-compat.workspace = true -reth-revm.workspace = true -reth-blockchain-tree.workspace = true -reth-beacon-consensus.workspace = true -reth-network-api.workspace = true -reth-network.workspace = true -reth-transaction-pool.workspace = true -reth-tasks.workspace = true - -eyre.workspace = true -futures.workspace = true -async-trait.workspace = true -tokio.workspace = true - -[[example]] -name = "network" -path = "network.rs" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index 574efe961..ea2c87c1b 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,16 +10,16 @@ to make a PR! ## Node Builder -| Example | Description | -|---------------------------------------------------------------| ------------------------------------------------------------------------------------------------ | -| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | -| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | -| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | -| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | -| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | -| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | -| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | -| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | +| Example | Description | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | ## ExEx @@ -36,22 +36,22 @@ to make a PR! ## Database -| Example | Description | -| --------------------------- | --------------------------------------------------------------- | +| Example | Description | +| ------------------------ | --------------------------------------------------------------- | | [DB access](./db-access) | Illustrates how to access Reth's database in a separate process | ## Network -| Example | Description | -| ---------------------------------- | ------------------------------------------------------------ | -| [Standalone network](./network.rs) | Illustrates how to use the network as a standalone component | +| Example | Description | +| ------------------------------- | ------------------------------------------------------------ | +| [Standalone network](./network) | Illustrates how to use the network as a standalone component | ## Mempool -| Example | Description | -|------------------------------------------------------| -------------------------------------------------------------------------------------------------------------------------- | +| Example | Description | +| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | | [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | -| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | +| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | ## P2P diff --git a/examples/network/Cargo.toml b/examples/network/Cargo.toml new file mode 100644 index 000000000..b3b740dd8 --- /dev/null +++ b/examples/network/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "network" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-network.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +futures.workspace = true +tokio.workspace = true +eyre.workspace = true \ No newline at end of file diff --git a/examples/network.rs b/examples/network/src/main.rs similarity index 96% rename from examples/network.rs rename to examples/network/src/main.rs index 18bf5cbcf..16482ca1f 100644 --- a/examples/network.rs +++ b/examples/network/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run --example network +//! cargo run --release -p network //! ``` use futures::StreamExt; From 1e94d9007e4761fd40e25b0a98aeb6fb77c7b912 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 12:20:30 +0200 Subject: [PATCH 092/250] chore: rm redunant withdrawal fn (#7940) --- crates/rpc/rpc-types-compat/src/block.rs | 25 ++++++------------------ 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 8c82686f9..b342f8a30 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -3,7 +3,7 @@ use crate::transaction::from_recovered_with_block_context; use alloy_rlp::Encodable; use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, B256, U256, + Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, Withdrawals, B256, U256, }; use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind, Header}; @@ -144,17 +144,6 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) } } -fn from_primitive_withdrawal( - withdrawal: reth_primitives::Withdrawal, -) -> reth_rpc_types::Withdrawal { - reth_rpc_types::Withdrawal { - index: withdrawal.index, - address: withdrawal.address, - validator_index: withdrawal.validator_index, - amount: withdrawal.amount, - } -} - #[inline] fn from_block_with_transactions( block_length: usize, @@ -167,13 +156,11 @@ fn from_block_with_transactions( let mut header = from_primitive_with_hash(block.header.seal(block_hash)); header.total_difficulty = Some(total_difficulty); - let withdrawals = if header.withdrawals_root.is_some() { - block - .withdrawals - .map(|withdrawals| withdrawals.into_iter().map(from_primitive_withdrawal).collect()) - } else { - None - }; + let withdrawals = header + .withdrawals_root + .is_some() + .then(|| block.withdrawals.map(Withdrawals::into_inner)) + .flatten(); Block { header, From e18869f25b960feed2c5c776724dcbee7578731b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 12:23:43 +0200 Subject: [PATCH 093/250] chore: rm redunant log fn (#7941) --- crates/rpc/rpc-types-compat/src/lib.rs | 1 - crates/rpc/rpc-types-compat/src/log.rs | 16 ---------------- 2 files changed, 17 deletions(-) delete mode 100644 crates/rpc/rpc-types-compat/src/log.rs diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index 7aabf4323..99eff4fa7 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,6 +12,5 @@ pub mod block; pub mod engine; -pub mod log; pub mod proof; pub mod transaction; diff --git a/crates/rpc/rpc-types-compat/src/log.rs b/crates/rpc/rpc-types-compat/src/log.rs deleted file mode 100644 index 2b6d33c42..000000000 --- a/crates/rpc/rpc-types-compat/src/log.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Compatibility functions for rpc `Log` type. - -/// Creates a new rpc Log from a primitive log type from DB -#[inline] -pub fn from_primitive_log(log: reth_primitives::Log) -> reth_rpc_types::Log { - reth_rpc_types::Log { - inner: log, - block_hash: None, - block_number: None, - block_timestamp: None, - transaction_hash: None, - transaction_index: None, - log_index: None, - removed: false, - } -} From f3ba80093b3eae1a7ca0b2df84b4aca9d3a928f9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 12:32:32 +0200 Subject: [PATCH 094/250] chore: simplify accesslist extraction (#7942) --- .../rpc-types-compat/src/transaction/mod.rs | 78 ++----------------- 1 file changed, 8 insertions(+), 70 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index d0f4672a2..a441c4c29 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,13 +1,10 @@ //! Compatibility functions for rpc `Transaction` type. -use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; +use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; use reth_primitives::{ - BlockNumber, Transaction as PrimitiveTransaction, TransactionSignedEcRecovered, - TxKind as PrimitiveTransactionKind, TxType, B256, + BlockNumber, TransactionSignedEcRecovered, TxKind as PrimitiveTransactionKind, TxType, B256, }; -#[cfg(feature = "optimism")] -use reth_rpc_types::optimism::OptimismTransactionFields; -use reth_rpc_types::{AccessList, AccessListItem, Transaction}; +use reth_rpc_types::Transaction; use signature::from_primitive_signature; pub use typed::*; @@ -45,7 +42,7 @@ fn fill( transaction_index: Option, ) -> Transaction { let signer = tx.signer(); - let mut signed_tx = tx.into_signed(); + let signed_tx = tx.into_signed(); let to = match signed_tx.kind() { PrimitiveTransactionKind::Create => None, @@ -77,51 +74,8 @@ fn fill( // let chain_id = signed_tx.chain_id().map(U64::from); let chain_id = signed_tx.chain_id(); - let mut blob_versioned_hashes = None; - - #[allow(unreachable_patterns)] - let access_list = match &mut signed_tx.transaction { - PrimitiveTransaction::Legacy(_) => None, - PrimitiveTransaction::Eip2930(tx) => Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )), - PrimitiveTransaction::Eip1559(tx) => Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )), - PrimitiveTransaction::Eip4844(tx) => { - // extract the blob hashes from the transaction - blob_versioned_hashes = Some(std::mem::take(&mut tx.blob_versioned_hashes)); - - Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )) - } - _ => { - // OP deposit tx - None - } - }; + let blob_versioned_hashes = signed_tx.blob_versioned_hashes(); + let access_list = signed_tx.access_list().cloned(); let signature = from_primitive_signature(*signed_tx.signature(), signed_tx.tx_type(), signed_tx.chain_id()); @@ -151,7 +105,7 @@ fn fill( blob_versioned_hashes, // Optimism fields #[cfg(feature = "optimism")] - other: OptimismTransactionFields { + other: reth_rpc_types::optimism::OptimismTransactionFields { source_hash: signed_tx.source_hash(), mint: signed_tx.mint().map(reth_primitives::U128::from), is_system_tx: signed_tx.is_deposit().then_some(signed_tx.is_system_transaction()), @@ -162,22 +116,6 @@ fn fill( } } -/// Convert [reth_primitives::AccessList] to [reth_rpc_types::AccessList] -pub fn from_primitive_access_list( - access_list: reth_primitives::AccessList, -) -> reth_rpc_types::AccessList { - reth_rpc_types::AccessList( - access_list - .0 - .into_iter() - .map(|item| reth_rpc_types::AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.into_iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ) -} - /// Convert [TransactionSignedEcRecovered] to [TransactionRequest] pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { let from = tx.signer(); @@ -187,7 +125,7 @@ pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> Transact let input = tx.transaction.input().clone(); let nonce = tx.transaction.nonce(); let chain_id = tx.transaction.chain_id(); - let access_list = tx.transaction.access_list().cloned().map(from_primitive_access_list); + let access_list = tx.transaction.access_list().cloned(); let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); let tx_type = tx.transaction.tx_type(); From c535d59c8dada6029c9036560eadb5274e8fcd2e Mon Sep 17 00:00:00 2001 From: Qiwei Yang Date: Sun, 28 Apr 2024 21:15:35 +0800 Subject: [PATCH 095/250] refactor: extract GenesisAllocator type from primitives (#7932) Co-authored-by: Matthias Seitz --- Cargo.lock | 10 + Cargo.toml | 2 + crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/mod.rs | 6 +- crates/primitives/src/genesis.rs | 209 ------------------ testing/testing-utils/Cargo.toml | 17 ++ .../testing-utils/src/genesis_allocator.rs | 204 +++++++++++++++++ testing/testing-utils/src/lib.rs | 12 + 8 files changed, 248 insertions(+), 213 deletions(-) create mode 100644 testing/testing-utils/Cargo.toml create mode 100644 testing/testing-utils/src/genesis_allocator.rs create mode 100644 testing/testing-utils/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 50fa59f01..54db17110 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6395,6 +6395,7 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tokio-util", "reth-tracing", "schnellru", @@ -7780,6 +7781,15 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "reth-testing-utils" +version = "0.2.0-beta.6" +dependencies = [ + "alloy-genesis", + "reth-primitives", + "secp256k1", +] + [[package]] name = "reth-tokio-util" version = "0.2.0-beta.6" diff --git a/Cargo.toml b/Cargo.toml index 7edc96312..c970bf6a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,7 @@ members = [ "examples/exex/op-bridge/", "examples/db-access", "testing/ef-tests/", + "testing/testing-utils", ] default-members = ["bin/reth"] @@ -275,6 +276,7 @@ reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-node-events = { path = "crates/node/events" } +reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 38dd772af..4e35d06f0 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -59,6 +59,7 @@ reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true +reth-testing-utils.workspace = true assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 72fc97297..428b95c0b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2350,11 +2350,9 @@ mod tests { use super::*; use reth_db::test_utils::create_test_static_files_dir; use reth_interfaces::test_utils::generators::random_block; - use reth_primitives::{ - genesis::{Genesis, GenesisAllocator}, - Hardfork, U256, - }; + use reth_primitives::{genesis::Genesis, Hardfork, U256}; use reth_provider::test_utils::blocks::BlockchainTestData; + use reth_testing_utils::GenesisAllocator; #[tokio::test] async fn new_payload_before_forkchoice() { diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index 991b01bd7..c81d9488d 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -3,212 +3,3 @@ // re-export genesis types #[doc(inline)] pub use alloy_genesis::*; - -#[cfg(any(test, feature = "test-utils"))] -pub use allocator::GenesisAllocator; - -#[cfg(any(test, feature = "test-utils"))] -mod allocator { - use crate::{public_key_to_address, Address, Bytes, B256, U256}; - use alloy_genesis::GenesisAccount; - use secp256k1::{ - rand::{thread_rng, RngCore}, - Keypair, Secp256k1, - }; - use std::collections::{hash_map::Entry, BTreeMap, HashMap}; - - /// This helps create a custom genesis alloc by making it easy to add funded accounts with known - /// signers to the genesis block. - /// - /// # Example - /// ``` - /// # use reth_primitives::{ genesis::GenesisAllocator, Address, U256, hex, Bytes}; - /// # use std::str::FromStr; - /// let mut allocator = GenesisAllocator::default(); - /// - /// // This will add a genesis account to the alloc builder, with the provided balance. The - /// // signer for the account will be returned. - /// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128)); - /// - /// // You can also provide code for the account. - /// let code = Bytes::from_str("0x1234").unwrap(); - /// let (_second_signer, _second_addr) = - /// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code); - /// - /// // You can also add an account with a specific address. - /// // This will not return a signer, since the address is provided by the user and the signer - /// // may be unknown. - /// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::
().unwrap(); - /// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128)); - /// - /// // Once you're done adding accounts, you can build the alloc. - /// let alloc = allocator.build(); - /// ``` - #[derive(Debug)] - pub struct GenesisAllocator<'a> { - /// The genesis alloc to be built. - alloc: HashMap, - /// The rng to use for generating key pairs. - rng: Box, - } - - impl<'a> GenesisAllocator<'a> { - /// Initialize a new alloc builder with the provided rng. - pub fn new_with_rng(rng: &'a mut R) -> Self - where - R: RngCore + std::fmt::Debug, - { - Self { alloc: HashMap::default(), rng: Box::new(rng) } - } - - /// Use the provided rng for generating key pairs. - pub fn with_rng(mut self, rng: &'a mut R) -> Self - where - R: RngCore + std::fmt::Debug, - { - self.rng = Box::new(rng); - self - } - - /// Add a funded account to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); - - (pair, address) - } - - /// Add a funded account to the genesis alloc with the provided code. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account_with_code( - &mut self, - balance: U256, - code: Bytes, - ) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_balance(balance).with_code(Some(code)), - ); - - (pair, address) - } - - /// Adds a funded account to the genesis alloc with the provided storage. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account_with_storage( - &mut self, - balance: U256, - storage: BTreeMap, - ) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_balance(balance).with_storage(Some(storage)), - ); - - (pair, address) - } - - /// Adds an account with code and storage to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code_and_storage( - &mut self, - code: Bytes, - storage: BTreeMap, - ) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)), - ); - - (pair, address) - } - - /// Adds an account with code to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); - - (pair, address) - } - - /// Add a funded account to the genesis alloc with the provided address. - /// - /// Neither the key pair nor the account will be returned, since the address is provided by - /// the user and the signer may be unknown. - pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) { - self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); - } - - /// Adds the given [GenesisAccount] to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn add_account(&mut self, account: GenesisAccount) -> Address { - let secp = Secp256k1::new(); - let pair = Keypair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, account); - - address - } - - /// Gets the account for the provided address. - /// - /// If it does not exist, this returns `None`. - pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> { - self.alloc.get(address) - } - - /// Gets a mutable version of the account for the provided address, if it exists. - pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> { - self.alloc.get_mut(address) - } - - /// Gets an [Entry] for the provided address. - pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> { - self.alloc.entry(address) - } - - /// Build the genesis alloc. - pub fn build(self) -> HashMap { - self.alloc - } - } - - impl Default for GenesisAllocator<'_> { - fn default() -> Self { - Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) } - } - } - - /// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules - /// for auto traits (Opt-in built-in traits). - trait RngDebug: RngCore + std::fmt::Debug {} - - impl RngDebug for T where T: RngCore + std::fmt::Debug {} -} diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml new file mode 100644 index 000000000..97a4c78df --- /dev/null +++ b/testing/testing-utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "reth-testing-utils" +description = "Testing utils for reth." +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +secp256k1.workspace = true +alloy-genesis.workspace = true +reth-primitives.workspace = true diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs new file mode 100644 index 000000000..33b7188d1 --- /dev/null +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -0,0 +1,204 @@ +//! Helps create a custom genesis alloc by making it easy to add funded accounts with known +//! signers to the genesis block. + +use alloy_genesis::GenesisAccount; +use reth_primitives::{public_key_to_address, Address, Bytes, B256, U256}; +use secp256k1::{ + rand::{thread_rng, RngCore}, + Keypair, Secp256k1, +}; +use std::collections::{hash_map::Entry, BTreeMap, HashMap}; + +/// This helps create a custom genesis alloc by making it easy to add funded accounts with known +/// signers to the genesis block. +/// +/// # Example +/// ``` +/// # use reth_primitives::{Address, U256, hex, Bytes}; +/// # use reth_testing_utils::GenesisAllocator; +/// # use std::str::FromStr; +/// let mut allocator = GenesisAllocator::default(); +/// +/// // This will add a genesis account to the alloc builder, with the provided balance. The +/// // signer for the account will be returned. +/// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128)); +/// +/// // You can also provide code for the account. +/// let code = Bytes::from_str("0x1234").unwrap(); +/// let (_second_signer, _second_addr) = +/// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code); +/// +/// // You can also add an account with a specific address. +/// // This will not return a signer, since the address is provided by the user and the signer +/// // may be unknown. +/// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::
().unwrap(); +/// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128)); +/// +/// // Once you're done adding accounts, you can build the alloc. +/// let alloc = allocator.build(); +/// ``` +#[derive(Debug)] +pub struct GenesisAllocator<'a> { + /// The genesis alloc to be built. + alloc: HashMap, + /// The rng to use for generating key pairs. + rng: Box, +} + +impl<'a> GenesisAllocator<'a> { + /// Initialize a new alloc builder with the provided rng. + pub fn new_with_rng(rng: &'a mut R) -> Self + where + R: RngCore + std::fmt::Debug, + { + Self { alloc: HashMap::default(), rng: Box::new(rng) } + } + + /// Use the provided rng for generating key pairs. + pub fn with_rng(mut self, rng: &'a mut R) -> Self + where + R: RngCore + std::fmt::Debug, + { + self.rng = Box::new(rng); + self + } + + /// Add a funded account to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); + + (pair, address) + } + + /// Add a funded account to the genesis alloc with the provided code. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account_with_code( + &mut self, + balance: U256, + code: Bytes, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc + .insert(address, GenesisAccount::default().with_balance(balance).with_code(Some(code))); + + (pair, address) + } + + /// Adds a funded account to the genesis alloc with the provided storage. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account_with_storage( + &mut self, + balance: U256, + storage: BTreeMap, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert( + address, + GenesisAccount::default().with_balance(balance).with_storage(Some(storage)), + ); + + (pair, address) + } + + /// Adds an account with code and storage to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_account_with_code_and_storage( + &mut self, + code: Bytes, + storage: BTreeMap, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert( + address, + GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)), + ); + + (pair, address) + } + + /// Adds an account with code to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); + + (pair, address) + } + + /// Add a funded account to the genesis alloc with the provided address. + /// + /// Neither the key pair nor the account will be returned, since the address is provided by + /// the user and the signer may be unknown. + pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) { + self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); + } + + /// Adds the given [GenesisAccount] to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn add_account(&mut self, account: GenesisAccount) -> Address { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, account); + + address + } + + /// Gets the account for the provided address. + /// + /// If it does not exist, this returns `None`. + pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> { + self.alloc.get(address) + } + + /// Gets a mutable version of the account for the provided address, if it exists. + pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> { + self.alloc.get_mut(address) + } + + /// Gets an [Entry] for the provided address. + pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> { + self.alloc.entry(address) + } + + /// Build the genesis alloc. + pub fn build(self) -> HashMap { + self.alloc + } +} + +impl Default for GenesisAllocator<'_> { + fn default() -> Self { + Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) } + } +} + +/// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules +/// for auto traits (Opt-in built-in traits). +trait RngDebug: RngCore + std::fmt::Debug {} + +impl RngDebug for T where T: RngCore + std::fmt::Debug {} diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs new file mode 100644 index 000000000..0cf98c6ff --- /dev/null +++ b/testing/testing-utils/src/lib.rs @@ -0,0 +1,12 @@ +//! Testing utilities. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod genesis_allocator; + +pub use genesis_allocator::GenesisAllocator; From ead0fbf8fc21b5983aa70f0e893d1c6254aa7995 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 28 Apr 2024 16:40:36 +0200 Subject: [PATCH 096/250] chore: rm anyhow dep (#7944) --- Cargo.lock | 1 - crates/primitives/Cargo.toml | 1 - crates/primitives/benches/integer_list.rs | 7 ++++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54db17110..d20f04fa8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7389,7 +7389,6 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-trie", - "anyhow", "arbitrary", "assert_matches", "byteorder", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 4fa504665..deaee2300 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -81,7 +81,6 @@ hash-db = "~0.15" plain_hasher = "0.2" sucds = "0.8.1" -anyhow = "1.0.75" # necessary so we don't hit a "undeclared 'std'": # https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 diff --git a/crates/primitives/benches/integer_list.rs b/crates/primitives/benches/integer_list.rs index 3945d48c9..56b0e9e38 100644 --- a/crates/primitives/benches/integer_list.rs +++ b/crates/primitives/benches/integer_list.rs @@ -121,7 +121,8 @@ mod elias_fano { let mut builder = EliasFanoBuilder::new( list.as_ref().iter().max().map_or(0, |max| max + 1), list.as_ref().len(), - )?; + ) + .map_err(|err| EliasFanoError::InvalidInput(err.to_string()))?; builder.extend(list.as_ref().iter().copied()); Ok(Self(builder.build())) } @@ -241,8 +242,8 @@ mod elias_fano { #[derive(Debug, thiserror::Error)] pub enum EliasFanoError { /// The provided input is invalid. - #[error(transparent)] - InvalidInput(#[from] anyhow::Error), + #[error("{0}")] + InvalidInput(String), /// Failed to deserialize data into type. #[error("failed to deserialize data into type")] FailedDeserialize, From cf6d34cf3da62704ebcbfe74d953249727c3d069 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sun, 28 Apr 2024 09:18:46 -0600 Subject: [PATCH 097/250] expose `PayloadTaskGuard` from `reth_basic_payload_builder` (#7945) --- crates/payload/basic/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 7903dfa8d..c89e8b949 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -226,12 +226,13 @@ pub struct PrecachedState { /// Restricts how many generator tasks can be executed at once. #[derive(Debug, Clone)] -struct PayloadTaskGuard(Arc); +pub struct PayloadTaskGuard(Arc); // === impl PayloadTaskGuard === impl PayloadTaskGuard { - fn new(max_payload_tasks: usize) -> Self { + /// Constructs `Self` with a maximum task count of `max_payload_tasks`. + pub fn new(max_payload_tasks: usize) -> Self { Self(Arc::new(Semaphore::new(max_payload_tasks))) } } From 1f6753b84a3ea3f75a6bcb9134e03f7b286ea130 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 12:30:49 +0200 Subject: [PATCH 098/250] chore: mode node-builder to node dir (#7952) --- Cargo.toml | 4 ++-- crates/{node-builder => node/builder}/Cargo.toml | 0 crates/{node-builder => node/builder}/README.md | 0 .../{node-builder => node/builder}/docs/mermaid/builder.mmd | 0 crates/{node-builder => node/builder}/src/builder/mod.rs | 0 crates/{node-builder => node/builder}/src/builder/states.rs | 0 .../{node-builder => node/builder}/src/components/builder.rs | 0 crates/{node-builder => node/builder}/src/components/mod.rs | 0 .../{node-builder => node/builder}/src/components/network.rs | 0 .../{node-builder => node/builder}/src/components/payload.rs | 0 crates/{node-builder => node/builder}/src/components/pool.rs | 0 crates/{node-builder => node/builder}/src/exex.rs | 0 crates/{node-builder => node/builder}/src/handle.rs | 0 crates/{node-builder => node/builder}/src/hooks.rs | 0 crates/{node-builder => node/builder}/src/launch/common.rs | 0 crates/{node-builder => node/builder}/src/launch/mod.rs | 0 crates/{node-builder => node/builder}/src/lib.rs | 0 crates/{node-builder => node/builder}/src/node.rs | 0 crates/{node-builder => node/builder}/src/rpc.rs | 0 crates/{node-builder => node/builder}/src/setup.rs | 0 20 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-builder => node/builder}/Cargo.toml (100%) rename crates/{node-builder => node/builder}/README.md (100%) rename crates/{node-builder => node/builder}/docs/mermaid/builder.mmd (100%) rename crates/{node-builder => node/builder}/src/builder/mod.rs (100%) rename crates/{node-builder => node/builder}/src/builder/states.rs (100%) rename crates/{node-builder => node/builder}/src/components/builder.rs (100%) rename crates/{node-builder => node/builder}/src/components/mod.rs (100%) rename crates/{node-builder => node/builder}/src/components/network.rs (100%) rename crates/{node-builder => node/builder}/src/components/payload.rs (100%) rename crates/{node-builder => node/builder}/src/components/pool.rs (100%) rename crates/{node-builder => node/builder}/src/exex.rs (100%) rename crates/{node-builder => node/builder}/src/handle.rs (100%) rename crates/{node-builder => node/builder}/src/hooks.rs (100%) rename crates/{node-builder => node/builder}/src/launch/common.rs (100%) rename crates/{node-builder => node/builder}/src/launch/mod.rs (100%) rename crates/{node-builder => node/builder}/src/lib.rs (100%) rename crates/{node-builder => node/builder}/src/node.rs (100%) rename crates/{node-builder => node/builder}/src/rpc.rs (100%) rename crates/{node-builder => node/builder}/src/setup.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index c970bf6a5..70e36cd94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ members = [ "crates/engine-primitives/", "crates/ethereum/engine-primitives/", "crates/node-ethereum/", - "crates/node-builder/", + "crates/node/builder/", "crates/optimism/consensus", "crates/optimism/node/", "crates/optimism/evm/", @@ -223,7 +223,7 @@ reth-dns-discovery = { path = "crates/net/dns" } reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-node-builder = { path = "crates/node-builder" } +reth-node-builder = { path = "crates/node/builder" } reth-node-ethereum = { path = "crates/node-ethereum" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } diff --git a/crates/node-builder/Cargo.toml b/crates/node/builder/Cargo.toml similarity index 100% rename from crates/node-builder/Cargo.toml rename to crates/node/builder/Cargo.toml diff --git a/crates/node-builder/README.md b/crates/node/builder/README.md similarity index 100% rename from crates/node-builder/README.md rename to crates/node/builder/README.md diff --git a/crates/node-builder/docs/mermaid/builder.mmd b/crates/node/builder/docs/mermaid/builder.mmd similarity index 100% rename from crates/node-builder/docs/mermaid/builder.mmd rename to crates/node/builder/docs/mermaid/builder.mmd diff --git a/crates/node-builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs similarity index 100% rename from crates/node-builder/src/builder/mod.rs rename to crates/node/builder/src/builder/mod.rs diff --git a/crates/node-builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs similarity index 100% rename from crates/node-builder/src/builder/states.rs rename to crates/node/builder/src/builder/states.rs diff --git a/crates/node-builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs similarity index 100% rename from crates/node-builder/src/components/builder.rs rename to crates/node/builder/src/components/builder.rs diff --git a/crates/node-builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs similarity index 100% rename from crates/node-builder/src/components/mod.rs rename to crates/node/builder/src/components/mod.rs diff --git a/crates/node-builder/src/components/network.rs b/crates/node/builder/src/components/network.rs similarity index 100% rename from crates/node-builder/src/components/network.rs rename to crates/node/builder/src/components/network.rs diff --git a/crates/node-builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs similarity index 100% rename from crates/node-builder/src/components/payload.rs rename to crates/node/builder/src/components/payload.rs diff --git a/crates/node-builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs similarity index 100% rename from crates/node-builder/src/components/pool.rs rename to crates/node/builder/src/components/pool.rs diff --git a/crates/node-builder/src/exex.rs b/crates/node/builder/src/exex.rs similarity index 100% rename from crates/node-builder/src/exex.rs rename to crates/node/builder/src/exex.rs diff --git a/crates/node-builder/src/handle.rs b/crates/node/builder/src/handle.rs similarity index 100% rename from crates/node-builder/src/handle.rs rename to crates/node/builder/src/handle.rs diff --git a/crates/node-builder/src/hooks.rs b/crates/node/builder/src/hooks.rs similarity index 100% rename from crates/node-builder/src/hooks.rs rename to crates/node/builder/src/hooks.rs diff --git a/crates/node-builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs similarity index 100% rename from crates/node-builder/src/launch/common.rs rename to crates/node/builder/src/launch/common.rs diff --git a/crates/node-builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs similarity index 100% rename from crates/node-builder/src/launch/mod.rs rename to crates/node/builder/src/launch/mod.rs diff --git a/crates/node-builder/src/lib.rs b/crates/node/builder/src/lib.rs similarity index 100% rename from crates/node-builder/src/lib.rs rename to crates/node/builder/src/lib.rs diff --git a/crates/node-builder/src/node.rs b/crates/node/builder/src/node.rs similarity index 100% rename from crates/node-builder/src/node.rs rename to crates/node/builder/src/node.rs diff --git a/crates/node-builder/src/rpc.rs b/crates/node/builder/src/rpc.rs similarity index 100% rename from crates/node-builder/src/rpc.rs rename to crates/node/builder/src/rpc.rs diff --git a/crates/node-builder/src/setup.rs b/crates/node/builder/src/setup.rs similarity index 100% rename from crates/node-builder/src/setup.rs rename to crates/node/builder/src/setup.rs From 1fbcdeb065b2549bccb8b131abbd08d6a270df84 Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Mon, 29 Apr 2024 13:15:38 +0200 Subject: [PATCH 099/250] Tracking current stage on pipeline unwind (#6558) --- crates/node/events/src/node.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 32bf66e1b..2689226ea 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -217,6 +217,17 @@ impl NodeState { self.current_stage = None; } } + PipelineEvent::Unwind { stage_id, input } => { + let current_stage = CurrentStage { + stage_id, + eta: Eta::default(), + checkpoint: input.checkpoint, + target: Some(input.unwind_to), + entities_checkpoint: input.checkpoint.entities(), + }; + + self.current_stage = Some(current_stage); + } _ => (), } } From 0501a437110ef4b6ae3ca35b0270e1e51f2e8426 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 29 Apr 2024 05:34:11 -0600 Subject: [PATCH 100/250] feat: expose `PendingPayload` from `reth-basic-payload-builder` (#7946) --- crates/payload/basic/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c89e8b949..4ee55b388 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -581,13 +581,23 @@ where /// A future that resolves to the result of the block building job. #[derive(Debug)] -struct PendingPayload

{ +pub struct PendingPayload

{ /// The marker to cancel the job on drop _cancel: Cancelled, /// The channel to send the result to. payload: oneshot::Receiver, PayloadBuilderError>>, } +impl

PendingPayload

{ + /// Constructs a `PendingPayload` future. + pub fn new( + cancel: Cancelled, + payload: oneshot::Receiver, PayloadBuilderError>>, + ) -> Self { + Self { _cancel: cancel, payload } + } +} + impl

Future for PendingPayload

{ type Output = Result, PayloadBuilderError>; From 3fc5cf646112e8d31d456f926fb07023af5829dc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 14:09:07 +0200 Subject: [PATCH 101/250] feat: use ComponentsBuilder as associated type in Node trait (#7957) --- crates/e2e-test-utils/src/lib.rs | 21 ++++---- crates/node-ethereum/src/node.rs | 19 +++----- crates/node-ethereum/tests/it/builder.rs | 8 +++ crates/node/builder/src/builder/mod.rs | 62 +++--------------------- crates/node/builder/src/node.rs | 24 +++------ crates/optimism/node/src/node.rs | 9 ++-- examples/custom-engine-types/src/main.rs | 15 +++--- 7 files changed, 51 insertions(+), 107 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 8fdaa044b..aa7d46428 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -6,8 +6,7 @@ use reth::{ }; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_node_builder::{ - components::{Components, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - FullNodeTypesAdapter, NodeAdapter, + components::NodeComponentsBuilder, FullNodeTypesAdapter, Node, NodeAdapter, RethFullAdapter, }; use reth_primitives::ChainSpec; use reth_provider::providers::BlockchainProvider; @@ -45,10 +44,7 @@ pub async fn setup( is_dev: bool, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + reth_node_builder::Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: NetworkBuilder, TmpPool>, - N::PayloadBuilder: PayloadServiceBuilder, TmpPool>, + N: Default + Node>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -103,11 +99,14 @@ where // Type aliases type TmpDB = Arc>; -type TmpPool = <>>::PoolBuilder as PoolBuilder< - TmpNodeAdapter, ->>::Pool; type TmpNodeAdapter = FullNodeTypesAdapter>; +type Adapter = NodeAdapter< + RethFullAdapter, + <>>::ComponentsBuilder as NodeComponentsBuilder< + RethFullAdapter, + >>::Components, +>; + /// Type alias for a type of NodeHelper -pub type NodeHelperType = - NodeTestContext, Components, TmpPool>>>; +pub type NodeHelperType = NodeTestContext>; diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index e7caa927a..5a1a03554 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -5,8 +5,8 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_network::NetworkHandle; use reth_node_builder::{ components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - node::{FullNodeTypes, Node, NodeTypes}, - BuilderContext, PayloadBuilderConfig, + node::{FullNodeTypes, NodeTypes}, + BuilderContext, Node, PayloadBuilderConfig, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::CanonStateSubscriptions; @@ -50,18 +50,11 @@ impl Node for EthereumNode where N: FullNodeTypes, { - type PoolBuilder = EthereumPoolBuilder; - type NetworkBuilder = EthereumNetworkBuilder; - type PayloadBuilder = EthereumPayloadBuilder; + type ComponentsBuilder = + ComponentsBuilder; - fn components( - self, - ) -> ComponentsBuilder { - ComponentsBuilder::default() - .node_types::() - .pool(EthereumPoolBuilder::default()) - .payload(EthereumPayloadBuilder::default()) - .network(EthereumNetworkBuilder::default()) + fn components_builder(self) -> Self::ComponentsBuilder { + Self::components() } } diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/node-ethereum/tests/it/builder.rs index 7cfc0d705..1f4579236 100644 --- a/crates/node-ethereum/tests/it/builder.rs +++ b/crates/node-ethereum/tests/it/builder.rs @@ -33,3 +33,11 @@ fn test_basic_setup() { }) .check_launch(); } + +#[test] +fn test_node_setup() { + let config = NodeConfig::test(); + let db = create_test_rw_db(); + let _builder = + NodeBuilder::new(config).with_database(db).node(EthereumNode::default()).check_launch(); +} diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 9649360eb..6365fca4d 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -3,7 +3,7 @@ #![allow(clippy::type_complexity, missing_debug_implementations)] use crate::{ - components::{Components, ComponentsBuilder, NodeComponentsBuilder, PoolBuilder}, + components::NodeComponentsBuilder, node::FullNode, rpc::{RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, Node, NodeHandle, @@ -204,28 +204,11 @@ where pub fn node( self, node: N, - ) -> NodeBuilderWithComponents< - RethFullAdapter, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - > + ) -> NodeBuilderWithComponents, N::ComponentsBuilder> where N: Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, { - self.with_types(node.clone()).with_components(node.components()) + self.with_types(node.clone()).with_components(node.components_builder()) } } @@ -271,33 +254,16 @@ where } /// Preconfigures the node with a specific node implementation. + /// + /// This is a convenience method that sets the node's types and components in one call. pub fn node( self, node: N, - ) -> WithLaunchContext< - NodeBuilderWithComponents< - RethFullAdapter, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - >, - > + ) -> WithLaunchContext, N::ComponentsBuilder>> where N: Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, { - self.with_types(node.clone()).with_components(node.components()) + self.with_types(node.clone()).with_components(node.components_builder()) } /// Launches a preconfigured [Node] @@ -312,24 +278,12 @@ where NodeHandle< NodeAdapter< RethFullAdapter, - Components< - RethFullAdapter, - >>::Pool, - >, + >>::Components, >, >, > where N: Node>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, { self.node(node).launch().await } diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 766bae14f..7831f29d0 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,7 +1,4 @@ -use crate::{ - components::ComponentsBuilder, - rpc::{RethRpcServerHandles, RpcRegistry}, -}; +use crate::rpc::{RethRpcServerHandles, RpcRegistry}; use reth_network::NetworkHandle; use reth_node_api::FullNodeComponents; use reth_node_core::{ @@ -19,23 +16,18 @@ use reth_tasks::TaskExecutor; use std::sync::Arc; // re-export the node api types +use crate::components::NodeComponentsBuilder; pub use reth_node_api::{FullNodeTypes, NodeTypes}; -/// A [Node] is a [NodeTypes] that comes with preconfigured components. +/// A [crate::Node] is a [NodeTypes] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. -pub trait Node: NodeTypes + Clone { - /// The type that builds the node's pool. - type PoolBuilder; - /// The type that builds the node's network. - type NetworkBuilder; - /// The type that builds the node's payload service. - type PayloadBuilder; +pub trait Node: NodeTypes + Clone { + /// The type that builds the node's components. + type ComponentsBuilder: NodeComponentsBuilder; - /// Returns the [ComponentsBuilder] for the node. - fn components( - self, - ) -> ComponentsBuilder; + /// Returns a [NodeComponentsBuilder] for the node. + fn components_builder(self) -> Self::ComponentsBuilder; } /// The launched node with all components including RPC handlers. diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0d6e4996a..8f6a3c19b 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -55,13 +55,10 @@ impl Node for OptimismNode where N: FullNodeTypes, { - type PoolBuilder = OptimismPoolBuilder; - type NetworkBuilder = OptimismNetworkBuilder; - type PayloadBuilder = OptimismPayloadBuilder; + type ComponentsBuilder = + ComponentsBuilder; - fn components( - self, - ) -> ComponentsBuilder { + fn components_builder(self) -> Self::ComponentsBuilder { let Self { args } = self; Self::components(args) } diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index a2ade9cc1..79639e1ba 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -202,13 +202,14 @@ impl Node for MyCustomNode where N: FullNodeTypes, { - type PoolBuilder = EthereumPoolBuilder; - type NetworkBuilder = EthereumNetworkBuilder; - type PayloadBuilder = CustomPayloadServiceBuilder; - - fn components( - self, - ) -> ComponentsBuilder { + type ComponentsBuilder = ComponentsBuilder< + N, + EthereumPoolBuilder, + CustomPayloadServiceBuilder, + EthereumNetworkBuilder, + >; + + fn components_builder(self) -> Self::ComponentsBuilder { ComponentsBuilder::default() .node_types::() .pool(EthereumPoolBuilder::default()) From 76e3aa9fa8db0f0e0c65821db95a712e0b5f4229 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 29 Apr 2024 14:54:15 +0200 Subject: [PATCH 102/250] feat: enable optimism hardforks on genesis parse (#7935) --- crates/primitives/src/chain/spec.rs | 97 +++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index a1ae18ad0..4ce26f84b 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -996,6 +996,9 @@ impl ChainSpec { impl From for ChainSpec { fn from(genesis: Genesis) -> Self { + #[cfg(feature = "optimism")] + let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + // Block-based hardforks let hardfork_opts = [ (Hardfork::Homestead, genesis.config.homestead_block), @@ -1011,6 +1014,8 @@ impl From for ChainSpec { (Hardfork::London, genesis.config.london_block), (Hardfork::ArrowGlacier, genesis.config.arrow_glacier_block), (Hardfork::GrayGlacier, genesis.config.gray_glacier_block), + #[cfg(feature = "optimism")] + (Hardfork::Bedrock, optimism_genesis_info.bedrock_block), ]; let mut hardforks = hardfork_opts .iter() @@ -1037,6 +1042,12 @@ impl From for ChainSpec { let time_hardfork_opts = [ (Hardfork::Shanghai, genesis.config.shanghai_time), (Hardfork::Cancun, genesis.config.cancun_time), + #[cfg(feature = "optimism")] + (Hardfork::Regolith, optimism_genesis_info.regolith_time), + #[cfg(feature = "optimism")] + (Hardfork::Ecotone, optimism_genesis_info.ecotone_time), + #[cfg(feature = "optimism")] + (Hardfork::Canyon, optimism_genesis_info.canyon_time), ]; let time_hardforks = time_hardfork_opts @@ -1691,6 +1702,42 @@ impl DepositContract { } } +#[cfg(feature = "optimism")] +struct OptimismGenesisInfo { + bedrock_block: Option, + regolith_time: Option, + ecotone_time: Option, + canyon_time: Option, +} + +#[cfg(feature = "optimism")] +impl OptimismGenesisInfo { + fn extract_from(genesis: &Genesis) -> Self { + Self { + bedrock_block: genesis + .config + .extra_fields + .get("bedrockBlock") + .and_then(|value| value.as_u64()), + regolith_time: genesis + .config + .extra_fields + .get("regolithTime") + .and_then(|value| value.as_u64()), + ecotone_time: genesis + .config + .extra_fields + .get("ecotoneTime") + .and_then(|value| value.as_u64()), + canyon_time: genesis + .config + .extra_fields + .get("canyonTime") + .and_then(|value| value.as_u64()), + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -3272,4 +3319,54 @@ Post-merge hard forks (timestamp based): fn is_bedrock_active() { assert!(!OP_MAINNET.is_bedrock_active_at_block(1)) } + + #[cfg(feature = "optimism")] + #[test] + fn parse_optimism_hardforks() { + let geth_genesis = r#" + { + "config": { + "bedrockBlock": 10, + "regolithTime": 20, + "ecotoneTime": 30, + "canyonTime": 40, + "optimism": { + "eip1559Elasticity": 50, + "eip1559Denominator": 60, + "eip1559DenominatorCanyon": 70 + } + } + } + "#; + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + + let actual_bedrock_block = genesis.config.extra_fields.get("bedrockBlock"); + assert_eq!(actual_bedrock_block, Some(serde_json::Value::from(10)).as_ref()); + let actual_regolith_timestamp = genesis.config.extra_fields.get("regolithTime"); + assert_eq!(actual_regolith_timestamp, Some(serde_json::Value::from(20)).as_ref()); + let actual_ecotone_timestamp = genesis.config.extra_fields.get("ecotoneTime"); + assert_eq!(actual_ecotone_timestamp, Some(serde_json::Value::from(30)).as_ref()); + let actual_canyon_timestamp = genesis.config.extra_fields.get("canyonTime"); + assert_eq!(actual_canyon_timestamp, Some(serde_json::Value::from(40)).as_ref()); + + let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); + assert_eq!( + optimism_object, + &serde_json::json!({ + "eip1559Elasticity": 50, + "eip1559Denominator": 60, + "eip1559DenominatorCanyon": 70 + }) + ); + let chain_spec: ChainSpec = genesis.into(); + assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); + + assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 40)); + } } From 14ed7196c258545992efa478739a361f2fde7f1e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 29 Apr 2024 13:56:22 +0100 Subject: [PATCH 103/250] chore(storage): use `TAKE` const instead of `UNWIND` (#7959) --- .../provider/src/providers/database/provider.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ba85a4a40..c96a05937 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -387,7 +387,7 @@ impl DatabaseProvider { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - pub fn unwind_or_peek_state( + pub fn unwind_or_peek_state( &self, range: RangeInclusive, ) -> ProviderResult { @@ -408,8 +408,8 @@ impl DatabaseProvider { let storage_range = BlockNumberAddress::range(range.clone()); let storage_changeset = - self.get_or_take::(storage_range)?; - let account_changeset = self.get_or_take::(range)?; + self.get_or_take::(storage_range)?; + let account_changeset = self.get_or_take::(range)?; // iterate previous value and get plain state value to create changeset // Double option around Account represent if Account state is know (first option) and @@ -478,7 +478,7 @@ impl DatabaseProvider { .push(old_storage); } - if UNWIND { + if TAKE { // iterate over local plain state remove all account and all storages. for (address, (old_account, new_account, storage)) in state.iter() { // revert account if needed. @@ -515,7 +515,7 @@ impl DatabaseProvider { // iterate over block body and create ExecutionResult let mut receipt_iter = self - .get_or_take::(from_transaction_num..=to_transaction_num)? + .get_or_take::(from_transaction_num..=to_transaction_num)? .into_iter(); let mut receipts = Vec::new(); From af2da06a0dbc64ae9581ac3f1061eb6574c0158a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 15:18:23 +0200 Subject: [PATCH 104/250] chore: add required trait bounds to DB type (#7960) --- crates/node/api/src/node.rs | 13 ++++++++----- crates/node/builder/src/builder/mod.rs | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 3fd158b6a..db4bdd961 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,7 +1,10 @@ //! Traits for configuring a node use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; -use reth_db::database::Database; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, +}; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; @@ -25,11 +28,11 @@ pub trait NodeTypes: Send + Sync + 'static { fn evm_config(&self) -> Self::Evm; } -/// A helper type that is downstream of the [NodeTypes] trait and adds stateful components to the +/// A helper trait that is downstream of the [NodeTypes] trait and adds stateful components to the /// node. pub trait FullNodeTypes: NodeTypes + 'static { - /// Underlying database type. - type DB: Database + Clone + 'static; + /// Underlying database type used by the node to store and retrieve data. + type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; /// The provider type used to interact with the node. type Provider: FullProvider; } @@ -71,7 +74,7 @@ impl FullNodeTypes for FullNodeTypesAdapter, - DB: Database + Clone + 'static, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { type DB = DB; type Provider = Provider; diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 6365fca4d..1da23fc04 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -187,7 +187,7 @@ impl NodeBuilder { impl NodeBuilder where - DB: Database + Unpin + Clone + 'static, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { /// Configures the types of the node. pub fn with_types(self, types: T) -> NodeBuilderWithTypes> From b2c3d0c0b341c8e5e2762ea5f6a1ae018a1c521c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 15:21:07 +0200 Subject: [PATCH 105/250] chore: update codeowners (#7961) --- CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index be8243ea2..bd86e2e58 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -7,7 +7,7 @@ crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk -crates/node-builder/ @mattsse @Rjected @onbjerg +crates/node/ @mattsse @Rjected @onbjerg crates/node-core/ @mattsse @Rjected @onbjerg crates/node-ethereum/ @mattsse @Rjected crates/payload/ @mattsse @Rjected @@ -23,4 +23,5 @@ crates/tracing @onbjerg crates/transaction-pool/ @mattsse crates/trie @rkrasiuk crates/trie-parallel @rkrasiuk +crates/optimism @mattsse .github/ @onbjerg @gakonst @DaniPopes From ee70351751abe61da3bc9f3b52f2e47c839c2328 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 15:25:53 +0200 Subject: [PATCH 106/250] test: rm redundant helper trait (#7962) --- .../testing-utils/src/genesis_allocator.rs | 20 ++++++++++--------- testing/testing-utils/src/lib.rs | 1 + 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs index 33b7188d1..067f68343 100644 --- a/testing/testing-utils/src/genesis_allocator.rs +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -7,7 +7,10 @@ use secp256k1::{ rand::{thread_rng, RngCore}, Keypair, Secp256k1, }; -use std::collections::{hash_map::Entry, BTreeMap, HashMap}; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt, +}; /// This helps create a custom genesis alloc by making it easy to add funded accounts with known /// signers to the genesis block. @@ -37,19 +40,18 @@ use std::collections::{hash_map::Entry, BTreeMap, HashMap}; /// // Once you're done adding accounts, you can build the alloc. /// let alloc = allocator.build(); /// ``` -#[derive(Debug)] pub struct GenesisAllocator<'a> { /// The genesis alloc to be built. alloc: HashMap, /// The rng to use for generating key pairs. - rng: Box, + rng: Box, } impl<'a> GenesisAllocator<'a> { /// Initialize a new alloc builder with the provided rng. pub fn new_with_rng(rng: &'a mut R) -> Self where - R: RngCore + std::fmt::Debug, + R: RngCore, { Self { alloc: HashMap::default(), rng: Box::new(rng) } } @@ -197,8 +199,8 @@ impl Default for GenesisAllocator<'_> { } } -/// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules -/// for auto traits (Opt-in built-in traits). -trait RngDebug: RngCore + std::fmt::Debug {} - -impl RngDebug for T where T: RngCore + std::fmt::Debug {} +impl fmt::Debug for GenesisAllocator<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GenesisAllocator").field("alloc", &self.alloc).finish_non_exhaustive() + } +} diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs index 0cf98c6ff..27b54b19e 100644 --- a/testing/testing-utils/src/lib.rs +++ b/testing/testing-utils/src/lib.rs @@ -6,6 +6,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] pub mod genesis_allocator; From a8cd1f71a03c773c24659fc28bfed2ba5f2bd97b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:32:42 +0100 Subject: [PATCH 107/250] chore: add test to `Compact` derived types to avoid backwards incompatibilities (#7822) Co-authored-by: Oliver Nordbjerg Co-authored-by: Oliver Nordbjerg --- .../codecs/derive/src/compact/flags.rs | 25 ++++- .../storage/codecs/derive/src/compact/mod.rs | 32 ++++--- crates/storage/codecs/src/alloy/withdrawal.rs | 18 ++++ crates/storage/db/Cargo.toml | 12 ++- .../storage/db/src/tables/codecs/compact.rs | 92 +++++++++++++++++++ 5 files changed, 163 insertions(+), 16 deletions(-) diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 650d97ea8..24757d8e6 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -36,7 +36,7 @@ pub(crate) fn generate_flag_struct( }; if total_bits == 0 { - return placeholder_flag_struct(&flags_ident) + return placeholder_flag_struct(ident, &flags_ident) } let (total_bytes, unused_bits) = pad_flag_struct(total_bits, &mut field_flags); @@ -51,9 +51,16 @@ pub(crate) fn generate_flag_struct( let docs = format!("Fieldset that facilitates compacting the parent type. Used bytes: {total_bytes} | Unused bits: {unused_bits}"); + let bitflag_encoded_bytes = format!("Used bytes by [`{flags_ident}`]"); // Generate the flag struct. quote! { + impl #ident { + #[doc = #bitflag_encoded_bytes] + pub const fn bitflag_encoded_bytes() -> usize { + #total_bytes as usize + } + } pub use #mod_flags_ident::#flags_ident; #[allow(non_snake_case)] mod #mod_flags_ident { @@ -146,8 +153,22 @@ fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, } /// Placeholder struct for when there are no bitfields to be added. -fn placeholder_flag_struct(flags: &Ident) -> TokenStream2 { +fn placeholder_flag_struct(ident: &Ident, flags: &Ident) -> TokenStream2 { + let bitflag_encoded_bytes = format!("Used bytes by [`{flags}`]"); + let bitflag_unused_bits = format!("Unused bits for new fields by [`{flags}`]"); quote! { + impl #ident { + #[doc = #bitflag_encoded_bytes] + pub const fn bitflag_encoded_bytes() -> usize { + 0 + } + + #[doc = #bitflag_unused_bits] + pub const fn bitflag_unused_bits() -> usize { + 0 + } + } + /// Placeholder struct for when there is no need for a fieldset. Doesn't actually write or read any data. #[derive(Debug, Default)] pub struct #flags { diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index 7614fa832..e67adb6fd 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -185,18 +185,18 @@ mod tests { #[test] fn gen() { let f_struct = quote! { - #[derive(Debug, PartialEq, Clone)] - pub struct TestStruct { - f_u64: u64, - f_u256: U256, - f_bool_t: bool, - f_bool_f: bool, - f_option_none: Option, - f_option_some: Option, - f_option_some_u64: Option, - f_vec_empty: Vec, - f_vec_some: Vec

, - } + #[derive(Debug, PartialEq, Clone)] + pub struct TestStruct { + f_u64: u64, + f_u256: U256, + f_bool_t: bool, + f_bool_f: bool, + f_option_none: Option, + f_option_some: Option, + f_option_some_u64: Option, + f_vec_empty: Vec, + f_vec_some: Vec
, + } }; // Generate code that will impl the `Compact` trait. @@ -208,7 +208,15 @@ mod tests { // Expected output in a TokenStream format. Commas matter! let should_output = quote! { + impl TestStruct { + #[doc = "Used bytes by [`TestStructFlags`]"] + pub const fn bitflag_encoded_bytes() -> usize { + 2u8 as usize + } + } + pub use TestStruct_flags::TestStructFlags; + #[allow(non_snake_case)] mod TestStruct_flags { use bytes::Buf; diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 0849b7e4a..5cdc1a667 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -59,4 +59,22 @@ mod tests { assert_eq!(withdrawal, decoded) } } + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + #[test] + fn test_ensure_backwards_compatibility() { + #[cfg(not(feature = "optimism"))] + { + assert_eq!(Withdrawal::bitflag_encoded_bytes(), 2); + } + + #[cfg(feature = "optimism")] + { + assert_eq!(Withdrawal::bitflag_encoded_bytes(), 2); + } + } } diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index f816cc2c0..97b556346 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -16,7 +16,10 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-codecs.workspace = true -reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed", "read-tx-timeouts"] } +reth-libmdbx = { workspace = true, optional = true, features = [ + "return-borrowed", + "read-tx-timeouts", +] } reth-nippy-jar.workspace = true reth-tracing.workspace = true @@ -58,7 +61,11 @@ serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true -pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +pprof = { workspace = true, features = [ + "flamegraph", + "frame-pointer", + "criterion", +] } criterion.workspace = true iai-callgrind = "0.10.2" @@ -81,6 +88,7 @@ arbitrary = [ "dep:proptest", "dep:proptest-derive", ] +optimism = [] [[bench]] name = "hash_keys" diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index c302c6a48..452f5c632 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -121,3 +121,95 @@ macro_rules! add_wrapper_struct { add_wrapper_struct!((U256, CompactU256)); add_wrapper_struct!((u64, CompactU64)); add_wrapper_struct!((ClientVersion, CompactClientVersion)); + +#[cfg(test)] +mod tests { + use crate::{ + codecs::{ + compact::{CompactClientVersion, CompactU64}, + CompactU256, + }, + models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, + }; + use reth_primitives::{ + stage::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, + ExecutionCheckpoint, HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, + StageUnitCheckpoint, StorageHashingCheckpoint, + }, + Account, Header, PruneCheckpoint, PruneMode, PruneSegment, Receipt, ReceiptWithBloom, + SealedHeader, TxEip1559, TxEip2930, TxEip4844, TxLegacy, Withdrawals, + }; + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + #[test] + fn test_ensure_backwards_compatibility() { + #[cfg(not(feature = "optimism"))] + { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 1); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + + #[cfg(feature = "optimism")] + { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 2); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + } +} From fd8fdcfd4ba2d830f2795c7c17f7f5bfa49ba388 Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Mon, 29 Apr 2024 23:30:42 +0800 Subject: [PATCH 108/250] refactor: remove futureUnordered in ipc (#7920) --- crates/rpc/ipc/src/server/connection.rs | 13 +- crates/rpc/ipc/src/server/future.rs | 27 +-- crates/rpc/ipc/src/server/ipc.rs | 3 +- crates/rpc/ipc/src/server/mod.rs | 237 ++++++++++++++++-------- 4 files changed, 162 insertions(+), 118 deletions(-) diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index 05f7a53a9..2aadc6e2b 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -9,7 +9,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; -use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::Framed; use tower::Service; @@ -18,17 +18,6 @@ pub(crate) type JsonRpcStream = Framed; #[pin_project::pin_project] pub(crate) struct IpcConn(#[pin] pub(crate) T); -impl IpcConn> -where - T: AsyncRead + AsyncWrite + Unpin, -{ - /// Create a response for when the server is busy and can't accept more requests. - pub(crate) async fn reject_connection(self) { - let mut parts = self.0.into_parts(); - let _ = parts.io.write_all(b"Too many connections. Please try again later.").await; - } -} - impl Stream for IpcConn> where T: AsyncRead + AsyncWrite, diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index f807af449..85c69c2a6 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -27,8 +27,7 @@ //! Utilities for handling async code. use std::sync::Arc; - -use tokio::sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}; +use tokio::sync::watch; #[derive(Debug, Clone)] pub(crate) struct StopHandle(watch::Receiver<()>); @@ -59,27 +58,3 @@ impl ServerHandle { self.0.closed().await } } - -/// Limits the number of connections. -pub(crate) struct ConnectionGuard(Arc); - -impl ConnectionGuard { - pub(crate) fn new(limit: usize) -> Self { - Self(Arc::new(Semaphore::new(limit))) - } - - pub(crate) fn try_acquire(&self) -> Option { - match self.0.clone().try_acquire_owned() { - Ok(guard) => Some(guard), - Err(TryAcquireError::Closed) => { - unreachable!("Semaphore::Close is never called and can't be closed") - } - Err(TryAcquireError::NoPermits) => None, - } - } - - #[allow(dead_code)] - pub(crate) fn available_connections(&self) -> usize { - self.0.available_permits() - } -} diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index daf7d1dc0..c73d9bb93 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -1,7 +1,5 @@ //! IPC request handling adapted from [`jsonrpsee`] http request handling -use std::sync::Arc; - use futures::{stream::FuturesOrdered, StreamExt}; use jsonrpsee::{ batch_response_error, @@ -17,6 +15,7 @@ use jsonrpsee::{ }, BatchResponseBuilder, MethodResponse, ResponsePayload, }; +use std::sync::Arc; use tokio::sync::OwnedSemaphorePermit; use tokio_util::either::Either; use tracing::instrument; diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 7239249e1..ed0eadb4a 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -2,16 +2,17 @@ use crate::server::{ connection::{IpcConn, JsonRpcStream}, - future::{ConnectionGuard, StopHandle}, + future::StopHandle, }; use futures::StreamExt; -use futures_util::{future::Either, stream::FuturesUnordered}; +use futures_util::{future::Either, AsyncWriteExt}; use interprocess::local_socket::tokio::{LocalSocketListener, LocalSocketStream}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, server::{ middleware::rpc::{RpcLoggerLayer, RpcServiceT}, - AlreadyStoppedError, IdProvider, RandomIntegerIdProvider, + AlreadyStoppedError, ConnectionGuard, ConnectionPermit, IdProvider, + RandomIntegerIdProvider, }, BoundedSubscriptions, MethodSink, Methods, }; @@ -24,10 +25,10 @@ use std::{ }; use tokio::{ io::{AsyncRead, AsyncWrite}, - sync::{oneshot, watch, OwnedSemaphorePermit}, + sync::{oneshot, watch}, }; use tower::{layer::util::Identity, Layer, Service}; -use tracing::{debug, trace, warn, Instrument}; +use tracing::{debug, instrument, trace, warn, Instrument}; // re-export so can be used during builder setup use crate::{ server::{ @@ -150,68 +151,44 @@ where // signal that we're ready to accept connections on_ready.send(Ok(())).ok(); - let message_buffer_capacity = self.cfg.message_buffer_capacity; - let max_request_body_size = self.cfg.max_request_body_size; - let max_response_body_size = self.cfg.max_response_body_size; - let max_log_length = self.cfg.max_log_length; - let id_provider = self.id_provider; - let max_subscriptions_per_connection = self.cfg.max_subscriptions_per_connection; - let mut id: u32 = 0; let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); - let mut connections = FuturesUnordered::new(); let stopped = stop_handle.clone().shutdown(); tokio::pin!(stopped); + let (drop_on_completion, mut process_connection_awaiter) = mpsc::channel::<()>(1); + trace!("accepting ipc connections"); loop { match try_accept_conn(&listener, stopped).await { AcceptConnection::Established { local_socket_stream, stop } => { - trace!("established new connection"); - let ipc = IpcConn(tokio_util::codec::Decoder::framed( - StreamCodec::stream_incoming(), - local_socket_stream.compat(), - )); - - let conn = match connection_guard.try_acquire() { - Some(conn) => conn, - None => { - warn!("Too many IPC connections. Please try again later."); - connections.push(tokio::spawn(ipc.reject_connection().in_current_span())); - stopped = stop; - continue; - } + let Some(conn_permit) = connection_guard.try_acquire() else { + let (mut _reader, mut writer) = local_socket_stream.into_split(); + let _ = writer.write_all(b"Too many connections. Please try again later.").await; + drop((_reader, writer)); + stopped = stop; + continue; }; - let (tx, rx) = mpsc::channel::(message_buffer_capacity as usize); - let method_sink = MethodSink::new_with_limit(tx, max_response_body_size); - let tower_service = TowerServiceNoHttp { - inner: ServiceData { - methods: methods.clone(), - max_request_body_size, - max_response_body_size, - max_log_length, - id_provider: id_provider.clone(), - stop_handle: stop_handle.clone(), - max_subscriptions_per_connection, - conn_id: id, - conn: Arc::new(conn), - bounded_subscriptions: BoundedSubscriptions::new( - max_subscriptions_per_connection, - ), - method_sink, - }, - rpc_middleware: self.rpc_middleware.clone(), - }; + let max_conns = connection_guard.max_connections(); + let curr_conns = max_conns - connection_guard.available_connections(); + trace!("Accepting new connection {}/{}", curr_conns, max_conns); + + let conn_permit = Arc::new(conn_permit); - let service = self.http_middleware.service(tower_service); - connections.push(tokio::spawn(process_connection( - ipc, - service, - stop_handle.clone(), - rx, - ).in_current_span())); + process_connection(ProcessConnection{ + http_middleware: &self.http_middleware, + rpc_middleware: self.rpc_middleware.clone(), + conn_permit, + conn_id: id, + server_cfg: self.cfg.clone(), + stop_handle: stop_handle.clone(), + drop_on_completion: drop_on_completion.clone(), + methods: methods.clone(), + id_provider: self.id_provider.clone(), + local_socket_stream, + }); id = id.wrapping_add(1); stopped = stop; @@ -224,11 +201,14 @@ where } } - // FuturesUnordered won't poll anything until this line but because the - // tasks are spawned (so that they can progress independently) - // then this just makes sure that all tasks are completed before - // returning from this function. - while connections.next().await.is_some() {} + // Drop the last Sender + drop(drop_on_completion); + + // Once this channel is closed it is safe to assume that all connections have been gracefully shutdown + while process_connection_awaiter.recv().await.is_some() { + // Generally, messages should not be sent across this channel, + // but we'll loop here to wait for `None` just to be on the safe side + } } } @@ -279,30 +259,22 @@ pub struct IpcServerStartError { pub(crate) struct ServiceData { /// Registered server methods. pub(crate) methods: Methods, - /// Max request body size. - pub(crate) max_request_body_size: u32, - /// Max request body size. - pub(crate) max_response_body_size: u32, - /// Max length for logging for request and response - /// - /// Logs bigger than this limit will be truncated. - pub(crate) max_log_length: u32, /// Subscription ID provider. pub(crate) id_provider: Arc, /// Stop handle. pub(crate) stop_handle: StopHandle, - /// Max subscriptions per connection. - pub(crate) max_subscriptions_per_connection: u32, /// Connection ID pub(crate) conn_id: u32, - /// Handle to hold a `connection permit`. - pub(crate) conn: Arc, + /// Connection Permit. + pub(crate) conn_permit: Arc, /// Limits the number of subscriptions for this connection pub(crate) bounded_subscriptions: BoundedSubscriptions, /// Sink that is used to send back responses to the connection. /// /// This is used for subscriptions. pub(crate) method_sink: MethodSink, + /// ServerConfig + pub(crate) server_cfg: Settings, } /// Similar to [`tower::ServiceBuilder`] but doesn't @@ -407,21 +379,21 @@ where let cfg = RpcServiceCfg::CallsAndSubscriptions { bounded_subscriptions: BoundedSubscriptions::new( - self.inner.max_subscriptions_per_connection, + self.inner.server_cfg.max_subscriptions_per_connection, ), id_provider: self.inner.id_provider.clone(), sink: self.inner.method_sink.clone(), }; - let max_response_body_size = self.inner.max_response_body_size as usize; - let max_request_body_size = self.inner.max_request_body_size as usize; + let max_response_body_size = self.inner.server_cfg.max_response_body_size as usize; + let max_request_body_size = self.inner.server_cfg.max_request_body_size as usize; + let conn = self.inner.conn_permit.clone(); let rpc_service = self.rpc_middleware.service(RpcService::new( self.inner.methods.clone(), max_response_body_size, self.inner.conn_id as usize, cfg, )); - let conn = self.inner.conn.clone(); // an ipc connection needs to handle read+write concurrently // even if the underlying rpc handler spawns the actual work or is does a lot of async any // additional overhead performed by `handle_request` can result in I/O latencies, for @@ -443,9 +415,81 @@ where } } +struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { + http_middleware: &'a tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + conn_permit: Arc, + conn_id: u32, + server_cfg: Settings, + stop_handle: StopHandle, + drop_on_completion: mpsc::Sender<()>, + methods: Methods, + id_provider: Arc, + local_socket_stream: LocalSocketStream, +} + /// Spawns the IPC connection onto a new task -async fn process_connection( - conn: IpcConn>, +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +fn process_connection<'b, RpcMiddleware, HttpMiddleware>( + params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, +) where + RpcMiddleware: Layer + Clone + Send + 'static, + for<'a> >::Service: RpcServiceT<'a>, + HttpMiddleware: Layer> + Send + 'static, + >>::Service: Send + + Service< + String, + Response = Option, + Error = Box, + >, + <>>::Service as Service>::Future: + Send + Unpin, + { + let ProcessConnection { + http_middleware, + rpc_middleware, + conn_permit, + conn_id, + server_cfg, + stop_handle, + drop_on_completion, + id_provider, + methods, + local_socket_stream, + } = params; + + let ipc = IpcConn(tokio_util::codec::Decoder::framed( + StreamCodec::stream_incoming(), + local_socket_stream.compat(), + )); + + let (tx, rx) = mpsc::channel::(server_cfg.message_buffer_capacity as usize); + let method_sink = MethodSink::new_with_limit(tx, server_cfg.max_response_body_size); + let tower_service = TowerServiceNoHttp { + inner: ServiceData { + methods, + id_provider, + stop_handle: stop_handle.clone(), + server_cfg: server_cfg.clone(), + conn_id, + conn_permit, + bounded_subscriptions: BoundedSubscriptions::new( + server_cfg.max_subscriptions_per_connection, + ), + method_sink, + }, + rpc_middleware, + }; + + let service = http_middleware.service(tower_service); + tokio::spawn(async { + to_ipc_service(ipc, service, stop_handle, rx).in_current_span().await; + drop(drop_on_completion) + }); +} + +async fn to_ipc_service( + ipc: IpcConn>, service: S, stop_handle: StopHandle, rx: mpsc::Receiver, @@ -457,7 +501,7 @@ async fn process_connection( { let rx_item = ReceiverStream::new(rx); let conn = IpcConnDriver { - conn, + conn: ipc, service, pending_calls: Default::default(), items: Default::default(), @@ -799,6 +843,7 @@ mod tests { types::Request, PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; + use reth_tracing::init_test_tracing; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -864,6 +909,7 @@ mod tests { #[tokio::test] async fn can_set_the_max_request_body_size() { + init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().max_request_body_size(100).build(&endpoint); let mut module = RpcModule::new(()); @@ -888,8 +934,43 @@ mod tests { assert!(response.is_err()); } + #[tokio::test] + async fn can_set_max_connections() { + init_test_tracing(); + + let endpoint = dummy_endpoint(); + let server = Builder::default().max_connections(2).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "succeed").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client1 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client2 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client3 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + + let response1: Result = client1.request("anything", rpc_params![]).await; + let response2: Result = client2.request("anything", rpc_params![]).await; + let response3: Result = client3.request("anything", rpc_params![]).await; + + assert!(response1.is_ok()); + assert!(response2.is_ok()); + // Third connection is rejected + assert!(response3.is_err()); + + // Decrement connection count + drop(client2); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Can connect again + let client4 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let response4: Result = client4.request("anything", rpc_params![]).await; + assert!(response4.is_ok()); + } + #[tokio::test] async fn test_rpc_request() { + init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint); let mut module = RpcModule::new(()); From 12873d515a9cea30d553fe938dc42a12c072562b Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 29 Apr 2024 10:09:16 -0600 Subject: [PATCH 109/250] feat: expose fields of `ResolveBestPayload` for remote construction (#7947) Co-authored-by: Matthias Seitz --- crates/payload/basic/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 4ee55b388..6aa3ccbc1 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -528,11 +528,11 @@ where #[derive(Debug)] pub struct ResolveBestPayload { /// Best payload so far. - best_payload: Option, + pub best_payload: Option, /// Regular payload job that's currently running that might produce a better payload. - maybe_better: Option>, + pub maybe_better: Option>, /// The empty payload building job in progress. - empty_payload: Option>>, + pub empty_payload: Option>>, } impl Future for ResolveBestPayload From 33f4c3fa91a423d33dbe9ec8f7adec5ac9364aa9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 29 Apr 2024 20:49:41 +0200 Subject: [PATCH 110/250] fix(op): genesis (#7969) --- crates/primitives/res/genesis/optimism.json | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/primitives/res/genesis/optimism.json b/crates/primitives/res/genesis/optimism.json index 2fb05781e..50c45b68e 100644 --- a/crates/primitives/res/genesis/optimism.json +++ b/crates/primitives/res/genesis/optimism.json @@ -12,10 +12,10 @@ "istanbulBlock": 0, "muirGlacierBlock": 0, "berlinBlock": 3950000, - "londonBlock": 3950000, - "arrowGlacierBlock": 3950000, - "grayGlacierBlock": 3950000, - "mergeNetsplitBlock": 3950000, + "londonBlock": 105235063, + "arrowGlacierBlock": 105235063, + "grayGlacierBlock": 105235063, + "mergeNetsplitBlock": 105235063, "bedrockBlock": 105235063, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true, @@ -28,5 +28,6 @@ "difficulty": "1", "gasLimit": "15000000", "extradata": "0x000000000000000000000000000000000000000000000000000000000000000000000398232e2064f896018496b4b44b3d62751f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stateRoot": "0xeddb4c1786789419153a27c4c80ff44a2226b6eda04f7e22ce5bae892ea568eb", "alloc": {} } \ No newline at end of file From 3754b1e1831b620a5eb35d396b8a2f4384a4c74a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 29 Apr 2024 21:22:01 +0200 Subject: [PATCH 111/250] feat(ci): add cfg check (#7965) Co-authored-by: Oliver Nordbjerg --- .github/workflows/lint.yml | 14 +++++++++++++- Makefile | 5 ++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ff3dad495..b939e159d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -126,11 +126,23 @@ jobs: with: cmd: jq empty etc/grafana/dashboards/overview.json + check-cfg: + name: check-cfg + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - run: cargo +nightly -Zcheck-cfg c + lint-success: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana, check-cfg] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/Makefile b/Makefile index 82994b3c2..fd5a252f1 100644 --- a/Makefile +++ b/Makefile @@ -414,8 +414,11 @@ test: make test-doc && \ make test-other-targets +cfg-check: + cargo +nightly -Zcheck-cfg c + pr: - make fmt && \ + make cfg-check && \ make lint && \ make docs && \ make test From 55017ef028adc298eff1dda9b9136dfed73d1f59 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 29 Apr 2024 21:45:05 +0200 Subject: [PATCH 112/250] chore: less restrictive bounds (#7970) --- crates/node/builder/src/builder/mod.rs | 7 ++----- crates/node/builder/src/launch/mod.rs | 20 +++++++------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 1da23fc04..10977360a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -34,12 +34,9 @@ use std::{str::FromStr, sync::Arc}; mod states; -/// The builtin provider type of the reth node. +/// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. -pub type RethFullProviderType = BlockchainProvider; - -/// The adapter type for a reth node with the given types -pub type RethFullAdapter = FullNodeTypesAdapter>; +pub type RethFullAdapter = FullNodeTypesAdapter>; #[cfg_attr(doc, aquamarine::aquamarine)] /// Declaratively construct a node. diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 408e47cd7..39c549e06 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -5,7 +5,7 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, node::FullNode, - BuilderContext, NodeBuilderWithComponents, NodeHandle, RethFullAdapter, + BuilderContext, NodeBuilderWithComponents, NodeHandle, }; use futures::{future, future::Either, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::AutoSealConsensus; @@ -17,14 +17,10 @@ use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_consensus::Consensus; -use reth_db::{ - database::Database, - database_metrics::{DatabaseMetadata, DatabaseMetrics}, -}; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; use reth_interfaces::p2p::either::EitherDownloader; use reth_network::NetworkEvents; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, engine_api_store::EngineApiStore, @@ -74,18 +70,16 @@ impl DefaultNodeLauncher { } } -impl LaunchNode, CB>> - for DefaultNodeLauncher +impl LaunchNode> for DefaultNodeLauncher where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypes, - CB: NodeComponentsBuilder>, + T: FullNodeTypes::DB>>, + CB: NodeComponentsBuilder, { - type Node = NodeHandle, CB::Components>>; + type Node = NodeHandle>; async fn launch_node( self, - target: NodeBuilderWithComponents, CB>, + target: NodeBuilderWithComponents, ) -> eyre::Result { let Self { ctx } = self; let NodeBuilderWithComponents { From 081978002738d17f08008357602dcabfb2cd67ca Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:48:05 -0400 Subject: [PATCH 113/250] feat: add `extract_chain_id` method (#7921) --- crates/primitives/src/transaction/mod.rs | 17 +++-------- .../primitives/src/transaction/signature.rs | 29 ++++++++++++++----- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 76d9b0197..f401b0ef6 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -32,7 +32,7 @@ pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; -pub use signature::Signature; +pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; @@ -1740,18 +1740,9 @@ impl TryFrom for TransactionSignedEcRecovered { // If the transaction type is Legacy, adjust the v component of the // signature according to the Ethereum specification TxType::Legacy => { - // Calculate the new v value based on the EIP-155 formula: - // v = {0,1} + CHAIN_ID * 2 + 35 - !(signature.v - - U256::from(if let Some(chain_id) = transaction.chain_id() { - // If CHAIN_ID is available, calculate the new v value - // accordingly - chain_id.saturating_mul(2).saturating_add(35) - } else { - // If CHAIN_ID is not available, set v = {0,1} + 27 - 27 - })) - .is_zero() + extract_chain_id(signature.v.to()) + .map_err(|_| ConversionError::InvalidSignature)? + .0 } _ => !signature.v.is_zero(), } diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 8cd57dc7f..29db729e9 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -114,16 +114,11 @@ impl Signature { if v == 0 && r.is_zero() && s.is_zero() { return Ok((Self { r, s, odd_y_parity: false }, None)) } - return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } - let odd_y_parity = v == 28; - Ok((Self { r, s, odd_y_parity }, None)) - } else { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - let odd_y_parity = ((v - 35) % 2) != 0; - let chain_id = (v - 35) >> 1; - Ok((Self { r, s, odd_y_parity }, Some(chain_id))) } + + let (odd_y_parity, chain_id) = extract_chain_id(v)?; + Ok((Self { r, s, odd_y_parity }, chain_id)) } /// Output the length of the signature without the length of the RLP header @@ -201,6 +196,24 @@ impl Signature { } } +/// Outputs (odd_y_parity, chain_id) from the `v` value. +/// This doesn't check validity of the `v` value for optimism. +#[inline] +pub fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { + if v < 35 { + // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity + if v != 27 && v != 28 { + return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) + } + Ok((v == 28, None)) + } else { + // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 + let odd_y_parity = ((v - 35) % 2) != 0; + let chain_id = (v - 35) >> 1; + Ok((odd_y_parity, Some(chain_id))) + } +} + #[cfg(test)] mod tests { use crate::{transaction::signature::SECP256K1N_HALF, Address, Signature, B256, U256}; From 593b2b6d04c6f8aac9c3e9cca8555ae738964f03 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 29 Apr 2024 22:20:51 +0200 Subject: [PATCH 114/250] feat(rlp): custom rlp encoding tx type (#7968) --- crates/primitives/src/receipt.rs | 5 +- crates/primitives/src/transaction/tx_type.rs | 55 ++++++++++++++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 2a25b2de8..63955a1d1 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -2,7 +2,7 @@ use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256}; use alloy_primitives::Log; -use alloy_rlp::{length_of_length, Decodable, Encodable}; +use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; #[cfg(any(test, feature = "arbitrary"))] use proptest::strategy::Strategy; @@ -18,7 +18,8 @@ use std::{ #[cfg_attr(feature = "zstd-codec", main_codec(no_arbitrary, zstd))] #[cfg_attr(not(feature = "zstd-codec"), main_codec(no_arbitrary))] #[add_arbitrary_tests] -#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)] +#[rlp(trailing)] pub struct Receipt { /// Receipt type. pub tx_type: TxType, diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 11df417d4..d203ecf77 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,4 +1,5 @@ use crate::{U64, U8}; +use alloy_rlp::{Decodable, Encodable}; use bytes::Buf; use reth_codecs::{derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; @@ -181,8 +182,30 @@ impl PartialEq for u8 { } } +impl Encodable for TxType { + fn encode(&self, out: &mut dyn bytes::BufMut) { + (*self as u8).encode(out); + } + + fn length(&self) -> usize { + 1 + } +} + +impl Decodable for TxType { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let ty = u8::decode(buf)?; + + TxType::try_from(ty).map_err(alloy_rlp::Error::Custom) + } +} + #[cfg(test)] mod tests { + use rand::Rng; + + use crate::hex; + use super::*; #[test] @@ -249,4 +272,36 @@ mod tests { ); } } + + #[test] + fn decode_tx_type() { + // Test for Legacy transaction + let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); + assert_eq!(tx_type, TxType::Legacy); + + // Test for EIP2930 transaction + let tx_type = TxType::decode(&mut &[1u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip2930); + + // Test for EIP1559 transaction + let tx_type = TxType::decode(&mut &[2u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip1559); + + // Test for EIP4844 transaction + let tx_type = TxType::decode(&mut &[3u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip4844); + + // Test random byte not in range + let buf = [rand::thread_rng().gen_range(4..=u8::MAX)]; + println!("{buf:?}"); + assert!(TxType::decode(&mut &buf[..]).is_err()); + + // Test for Deposit transaction + #[cfg(feature = "optimism")] + { + let buf = [126u8]; + let tx_type = TxType::decode(&mut &buf[..]).unwrap(); + assert_eq!(tx_type, TxType::Deposit); + } + } } From 6619faf42b7f2706d7bbf8df804e2c21139b1072 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 29 Apr 2024 23:44:44 +0200 Subject: [PATCH 115/250] docs: correct `encode_for_signing` docs (#7973) --- crates/primitives/src/transaction/eip1559.rs | 2 +- crates/primitives/src/transaction/eip2930.rs | 2 +- crates/primitives/src/transaction/eip4844.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 68da7d8d9..06cbc129c 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -193,7 +193,7 @@ impl TxEip1559 { self.input.len() // input } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-1559 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 86794a512..b0d1291e8 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -157,7 +157,7 @@ impl TxEip2930 { TxType::Eip2930 } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-2930 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, gas_price, gas_limit, to, value, input, access_list)` diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 0a3790abe..8356d6788 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -291,7 +291,7 @@ impl TxEip4844 { TxType::Eip4844 } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-4844 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, From d9faaa80cfb58527d160dc1d65ca44ee851f5cea Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 29 Apr 2024 23:45:21 +0200 Subject: [PATCH 116/250] chore: rm unused consensus fns (#7972) --- crates/consensus/common/src/validation.rs | 238 +--------------------- 1 file changed, 6 insertions(+), 232 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 06b2303a8..c6e4e0aee 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -7,12 +7,9 @@ use reth_primitives::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, MAXIMUM_EXTRA_DATA_SIZE, }, - BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, - SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, - TxLegacy, + ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, }; -use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; -use std::collections::{hash_map::Entry, HashMap}; +use reth_provider::{HeaderProvider, WithdrawalsProvider}; /// Validate header standalone pub fn validate_header_standalone( @@ -59,148 +56,6 @@ pub fn validate_header_standalone( Ok(()) } -/// Validate a transaction with regard to a block header. -/// -/// The only parameter from the header that affects the transaction is `base_fee`. -pub fn validate_transaction_regarding_header( - transaction: &Transaction, - chain_spec: &ChainSpec, - at_block_number: BlockNumber, - at_timestamp: u64, - base_fee: Option, -) -> Result<(), ConsensusError> { - #[allow(unreachable_patterns)] - let chain_id = match transaction { - Transaction::Legacy(TxLegacy { chain_id, .. }) => { - // EIP-155: Simple replay attack protection: https://eips.ethereum.org/EIPS/eip-155 - if !chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(at_block_number) && - chain_id.is_some() - { - return Err(InvalidTransactionError::OldLegacyChainId.into()) - } - *chain_id - } - Transaction::Eip2930(TxEip2930 { chain_id, .. }) => { - // EIP-2930: Optional access lists: https://eips.ethereum.org/EIPS/eip-2930 (New transaction type) - if !chain_spec.fork(Hardfork::Berlin).active_at_block(at_block_number) { - return Err(InvalidTransactionError::Eip2930Disabled.into()) - } - Some(*chain_id) - } - Transaction::Eip1559(TxEip1559 { - chain_id, - max_fee_per_gas, - max_priority_fee_per_gas, - .. - }) => { - // EIP-1559: Fee market change for ETH 1.0 chain https://eips.ethereum.org/EIPS/eip-1559 - if !chain_spec.fork(Hardfork::London).active_at_block(at_block_number) { - return Err(InvalidTransactionError::Eip1559Disabled.into()) - } - - // EIP-1559: add more constraints to the tx validation - // https://github.com/ethereum/EIPs/pull/3594 - if max_priority_fee_per_gas > max_fee_per_gas { - return Err(InvalidTransactionError::TipAboveFeeCap.into()) - } - - Some(*chain_id) - } - Transaction::Eip4844(TxEip4844 { - chain_id, - max_fee_per_gas, - max_priority_fee_per_gas, - .. - }) => { - // EIP-4844: Shard Blob Transactions https://eips.ethereum.org/EIPS/eip-4844 - if !chain_spec.is_cancun_active_at_timestamp(at_timestamp) { - return Err(InvalidTransactionError::Eip4844Disabled.into()) - } - - // EIP-1559: add more constraints to the tx validation - // https://github.com/ethereum/EIPs/pull/3594 - if max_priority_fee_per_gas > max_fee_per_gas { - return Err(InvalidTransactionError::TipAboveFeeCap.into()) - } - - Some(*chain_id) - } - _ => { - // Op Deposit - None - } - }; - if let Some(chain_id) = chain_id { - if chain_id != chain_spec.chain().id() { - return Err(InvalidTransactionError::ChainIdMismatch.into()) - } - } - // Check basefee and few checks that are related to that. - // https://github.com/ethereum/EIPs/pull/3594 - if let Some(base_fee_per_gas) = base_fee { - if transaction.max_fee_per_gas() < base_fee_per_gas as u128 { - return Err(InvalidTransactionError::FeeCapTooLow.into()) - } - } - - Ok(()) -} - -/// Iterate over all transactions, validate them against each other and against the block. -/// There is no gas check done as [REVM](https://github.com/bluealloy/revm/blob/fd0108381799662098b7ab2c429ea719d6dfbf28/crates/revm/src/evm_impl.rs#L113-L131) already checks that. -pub fn validate_all_transaction_regarding_block_and_nonces< - 'a, - Provider: HeaderProvider + AccountReader, ->( - transactions: impl Iterator, - header: &Header, - provider: Provider, - chain_spec: &ChainSpec, -) -> RethResult<()> { - let mut account_nonces = HashMap::new(); - - for transaction in transactions { - validate_transaction_regarding_header( - transaction, - chain_spec, - header.number, - header.timestamp, - header.base_fee_per_gas, - )?; - - // Get nonce, if there is previous transaction from same sender we need - // to take that nonce. - let nonce = match account_nonces.entry(transaction.signer()) { - Entry::Occupied(mut entry) => { - let nonce = *entry.get(); - *entry.get_mut() += 1; - nonce - } - Entry::Vacant(entry) => { - let account = provider.basic_account(transaction.signer())?.unwrap_or_default(); - // Signer account shouldn't have bytecode. Presence of bytecode means this is a - // smartcontract. - if account.has_bytecode() { - return Err(ConsensusError::from( - InvalidTransactionError::SignerAccountHasBytecode, - ) - .into()) - } - let nonce = account.nonce; - entry.insert(account.nonce + 1); - nonce - } - }; - - // check nonce - if transaction.nonce() != nonce { - return Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - } - } - - Ok(()) -} - /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -345,10 +200,11 @@ mod tests { test_utils::generators::{self, Rng}, }; use reth_primitives::{ - hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, Bytes, - ChainSpecBuilder, Signature, TransactionSigned, TxKind, Withdrawal, Withdrawals, MAINNET, - U256, + hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, + BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, + TxKind, Withdrawal, Withdrawals, U256, }; + use reth_provider::AccountReader; use std::ops::RangeBounds; mock! { @@ -382,15 +238,6 @@ mod tests { withdrawals_provider: MockWithdrawalsProvider::new(), } } - /// New provider where is_known is always true - fn new_known() -> Self { - Self { - is_known: true, - parent: None, - account: None, - withdrawals_provider: MockWithdrawalsProvider::new(), - } - } } impl AccountReader for Provider { @@ -457,25 +304,6 @@ mod tests { } } - fn mock_tx(nonce: u64) -> TransactionSignedEcRecovered { - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce, - gas_price: 0x28f000fff, - gas_limit: 10, - to: TxKind::Call(Address::default()), - value: U256::from(3_u64), - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - let signer = Address::ZERO; - TransactionSignedEcRecovered::from_signed_transaction(tx, signer) - } - fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned { let mut rng = generators::rng(); let request = Transaction::Eip4844(TxEip4844 { @@ -539,60 +367,6 @@ mod tests { (SealedBlock { header: header.seal_slow(), body, ommers, withdrawals: None }, parent) } - #[test] - fn sanity_tx_nonce_check() { - let (block, _) = mock_block(); - let tx1 = mock_tx(0); - let tx2 = mock_tx(1); - let provider = Provider::new_known(); - - let txs = vec![tx1, tx2]; - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ) - .expect("To Pass"); - } - - #[test] - fn nonce_gap_in_first_transaction() { - let (block, _) = mock_block(); - let tx1 = mock_tx(1); - let provider = Provider::new_known(); - - let txs = vec![tx1]; - assert_eq!( - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ), - Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - ) - } - - #[test] - fn nonce_gap_on_second_tx_from_same_signer() { - let (block, _) = mock_block(); - let tx1 = mock_tx(0); - let tx2 = mock_tx(3); - let provider = Provider::new_known(); - - let txs = vec![tx1, tx2]; - assert_eq!( - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ), - Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - ); - } - #[test] fn valid_withdrawal_index() { let chain_spec = ChainSpecBuilder::mainnet().shanghai_activated().build(); From 4c01856e6d0c35423a7ccb9727961f6ecbb7883a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 29 Apr 2024 23:45:43 +0200 Subject: [PATCH 117/250] chore: pool type must be unpin (#7974) --- crates/node/api/src/node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index db4bdd961..2eb14011f 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -83,7 +83,7 @@ where /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + 'static { /// The transaction pool of the node. - type Pool: TransactionPool; + type Pool: TransactionPool + Unpin; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; From 490fbb0f52e0b73eb65076912d42875093074694 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 01:32:08 +0200 Subject: [PATCH 118/250] docs: minor doc nits in reth basic payload (#7977) --- crates/payload/basic/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 6aa3ccbc1..c32961c72 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -53,9 +53,9 @@ mod metrics; pub struct BasicPayloadJobGenerator { /// The client that can interact with the chain. client: Client, - /// txpool + /// The transaction pool to pull transactions from. pool: Pool, - /// How to spawn building tasks + /// The task executor to spawn payload building tasks on. executor: Tasks, /// The configuration for the job generator. config: BasicPayloadJobGeneratorConfig, From 9fc75c624783fe6fad4dd97dedd0ef152ddb234c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 30 Apr 2024 10:55:34 +0200 Subject: [PATCH 119/250] chore: pin ethereum tests (#7986) --- .github/workflows/unit.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 91a247fac..05ff09609 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -57,6 +57,7 @@ jobs: uses: actions/checkout@v4 with: repository: ethereum/tests + ref: 1c23e3c27ac53b794de0844d2d5e19cd2495b9d8 path: testing/ef-tests/ethereum-tests submodules: recursive fetch-depth: 1 From d281699c2a4df0c61d9459f11ec7157095dabee7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 10:55:50 +0200 Subject: [PATCH 120/250] chore: rm clap as dep of `reth-static-file` (#7980) --- Cargo.lock | 1 - bin/reth/Cargo.toml | 6 ++++-- crates/static-file/Cargo.toml | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d20f04fa8..b283e8eaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7749,7 +7749,6 @@ name = "reth-static-file" version = "0.2.0-beta.6" dependencies = [ "assert_matches", - "clap", "parking_lot 0.12.2", "rayon", "reth-db", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 995b29610..c323017d0 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -43,7 +43,7 @@ reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-discv4.workspace = true reth-prune.workspace = true -reth-static-file = { workspace = true, features = ["clap"] } +reth-static-file = { workspace = true } reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-node-api.workspace = true @@ -79,7 +79,9 @@ rand.workspace = true # tui comfy-table = "7.0" crossterm = "0.27.0" -ratatui = { version = "0.26", default-features = false, features = ["crossterm"] } +ratatui = { version = "0.26", default-features = false, features = [ + "crossterm", +] } human_bytes = "0.4.1" # async diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index b3fc1b93d..1345b2f23 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -25,7 +25,6 @@ tokio-stream.workspace = true # misc tracing.workspace = true -clap = { workspace = true, features = ["derive"], optional = true } rayon.workspace = true parking_lot = { workspace = true, features = ["send_guard", "arc_lock"] } @@ -37,4 +36,3 @@ assert_matches.workspace = true tempfile.workspace = true [features] -clap = ["dep:clap"] From 5d795b9342aef986adc7e6fcaf0b053104675fcf Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 30 Apr 2024 12:02:51 +0200 Subject: [PATCH 121/250] fix: handle Multiplex P2PStream.poll_ready errors (#7988) --- crates/net/eth-wire/src/multiplex.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 27d0f0a00..8677ae77c 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -19,6 +19,7 @@ use std::{ use crate::{ capability::{Capability, SharedCapabilities, SharedCapability, UnsupportedCapabilityError}, errors::{EthStreamError, P2PStreamError}, + p2pstream::DisconnectP2P, CanDisconnect, DisconnectReason, EthStream, P2PStream, Status, UnauthedEthStream, }; use bytes::{Bytes, BytesMut}; @@ -465,7 +466,7 @@ where let mut conn_ready = true; loop { match this.inner.conn.poll_ready_unpin(cx) { - Poll::Ready(_) => { + Poll::Ready(Ok(())) => { if let Some(msg) = this.inner.out_buffer.pop_front() { if let Err(err) = this.inner.conn.start_send_unpin(msg) { return Poll::Ready(Some(Err(err.into()))) @@ -474,6 +475,14 @@ where break } } + Poll::Ready(Err(err)) => { + if let Err(disconnect_err) = + this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested) + { + return Poll::Ready(Some(Err(disconnect_err.into()))); + } + return Poll::Ready(Some(Err(err.into()))); + } Poll::Pending => { conn_ready = false; break From 053b14abdce7f3fe6cd205f6b34c3012e8a0d46e Mon Sep 17 00:00:00 2001 From: Jacob Kaufmann Date: Tue, 30 Apr 2024 04:12:11 -0600 Subject: [PATCH 122/250] feat: add method to EthBuiltPayload to get blob sidecars (#7979) Co-authored-by: Matthias Seitz --- crates/ethereum/engine-primitives/src/payload.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index a354e0588..264355ac2 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -57,6 +57,11 @@ impl EthBuiltPayload { self.fees } + /// Returns the blob sidecars. + pub fn sidecars(&self) -> &[BlobTransactionSidecar] { + &self.sidecars + } + /// Adds sidecars to the payload. pub fn extend_sidecars(&mut self, sidecars: Vec) { self.sidecars.extend(sidecars) From 1fe00a7c359d1b08bb381ef375f91d546d9da582 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 30 Apr 2024 12:23:02 +0200 Subject: [PATCH 123/250] feat: use `FnOnce` for node hooks (#7975) Co-authored-by: Roman Krasiuk --- crates/node/builder/src/builder/states.rs | 12 +++++++----- crates/node/builder/src/hooks.rs | 20 +++++++++---------- crates/node/builder/src/launch/mod.rs | 3 +-- crates/node/builder/src/rpc.rs | 24 +++++++++++++---------- 4 files changed, 32 insertions(+), 27 deletions(-) diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 975590c5f..b77588df4 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -157,7 +157,7 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn(NodeAdapter) -> eyre::Result<()> + Send + 'static, + F: FnOnce(NodeAdapter) -> eyre::Result<()> + Send + 'static, { self.add_ons.hooks.set_on_component_initialized(hook); self @@ -166,7 +166,7 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn(FullNode>) -> eyre::Result<()> + Send + 'static, + F: FnOnce(FullNode>) -> eyre::Result<()> + Send + 'static, { self.add_ons.hooks.set_on_node_started(hook); self @@ -175,7 +175,7 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn( + F: FnOnce( RpcContext<'_, NodeAdapter>, RethRpcServerHandles, ) -> eyre::Result<()> @@ -189,7 +189,9 @@ impl> NodeBuilderWithComponents(mut self, hook: F) -> Self where - F: Fn(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + Send + 'static, + F: FnOnce(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + + Send + + 'static, { self.add_ons.rpc.set_extend_rpc_modules(hook); self @@ -202,7 +204,7 @@ impl> NodeBuilderWithComponents(mut self, exex_id: impl Into, exex: F) -> Self where - F: Fn(ExExContext>) -> R + Send + 'static, + F: FnOnce(ExExContext>) -> R + Send + 'static, R: Future> + Send, E: Future> + Send, { diff --git a/crates/node/builder/src/hooks.rs b/crates/node/builder/src/hooks.rs index 9d2127f5a..468c84e85 100644 --- a/crates/node/builder/src/hooks.rs +++ b/crates/node/builder/src/hooks.rs @@ -77,15 +77,15 @@ pub trait OnComponentInitializedHook: Send { /// Consumes the event hook and runs it. /// /// If this returns an error, the node launch will be aborted. - fn on_event(&self, node: Node) -> eyre::Result<()>; + fn on_event(self: Box, node: Node) -> eyre::Result<()>; } impl OnComponentInitializedHook for F where - F: Fn(Node) -> eyre::Result<()> + Send, + F: FnOnce(Node) -> eyre::Result<()> + Send, { - fn on_event(&self, node: Node) -> eyre::Result<()> { - self(node) + fn on_event(self: Box, node: Node) -> eyre::Result<()> { + (*self)(node) } } @@ -94,27 +94,27 @@ pub trait OnNodeStartedHook: Send { /// Consumes the event hook and runs it. /// /// If this returns an error, the node launch will be aborted. - fn on_event(&self, node: FullNode) -> eyre::Result<()>; + fn on_event(self: Box, node: FullNode) -> eyre::Result<()>; } impl OnNodeStartedHook for F where Node: FullNodeComponents, - F: Fn(FullNode) -> eyre::Result<()> + Send, + F: FnOnce(FullNode) -> eyre::Result<()> + Send, { - fn on_event(&self, node: FullNode) -> eyre::Result<()> { - self(node) + fn on_event(self: Box, node: FullNode) -> eyre::Result<()> { + (*self)(node) } } impl OnComponentInitializedHook for () { - fn on_event(&self, _node: Node) -> eyre::Result<()> { + fn on_event(self: Box, _node: Node) -> eyre::Result<()> { Ok(()) } } impl OnNodeStartedHook for () { - fn on_event(&self, _node: FullNode) -> eyre::Result<()> { + fn on_event(self: Box, _node: FullNode) -> eyre::Result<()> { Ok(()) } } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 39c549e06..4f1f00e4e 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -232,8 +232,7 @@ where async move { while let Ok(notification) = canon_state_notifications.recv().await { handle.send_async(notification.into()).await.expect( - "blockchain tree notification could not be sent to exex -manager", + "blockchain tree notification could not be sent to exex manager", ); } }, diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 3efeba7f5..a65dcfce5 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -98,7 +98,7 @@ impl fmt::Debug for RpcHooks { pub trait OnRpcStarted: Send { /// The hook that is called once the rpc server is started. fn on_rpc_started( - &self, + self: Box, ctx: RpcContext<'_, Node>, handles: RethRpcServerHandles, ) -> eyre::Result<()>; @@ -106,20 +106,24 @@ pub trait OnRpcStarted: Send { impl OnRpcStarted for F where - F: Fn(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send, + F: FnOnce(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send, Node: FullNodeComponents, { fn on_rpc_started( - &self, + self: Box, ctx: RpcContext<'_, Node>, handles: RethRpcServerHandles, ) -> eyre::Result<()> { - self(ctx, handles) + (*self)(ctx, handles) } } impl OnRpcStarted for () { - fn on_rpc_started(&self, _: RpcContext<'_, Node>, _: RethRpcServerHandles) -> eyre::Result<()> { + fn on_rpc_started( + self: Box, + _: RpcContext<'_, Node>, + _: RethRpcServerHandles, + ) -> eyre::Result<()> { Ok(()) } } @@ -127,21 +131,21 @@ impl OnRpcStarted for () { /// Event hook that is called when the rpc server is started. pub trait ExtendRpcModules: Send { /// The hook that is called once the rpc server is started. - fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()>; + fn extend_rpc_modules(self: Box, ctx: RpcContext<'_, Node>) -> eyre::Result<()>; } impl ExtendRpcModules for F where - F: Fn(RpcContext<'_, Node>) -> eyre::Result<()> + Send, + F: FnOnce(RpcContext<'_, Node>) -> eyre::Result<()> + Send, Node: FullNodeComponents, { - fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()> { - self(ctx) + fn extend_rpc_modules(self: Box, ctx: RpcContext<'_, Node>) -> eyre::Result<()> { + (*self)(ctx) } } impl ExtendRpcModules for () { - fn extend_rpc_modules(&self, _: RpcContext<'_, Node>) -> eyre::Result<()> { + fn extend_rpc_modules(self: Box, _: RpcContext<'_, Node>) -> eyre::Result<()> { Ok(()) } } From b3d7b7d501d7a6b81da9c1f9643c5ec55786bb96 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 30 Apr 2024 12:30:04 +0200 Subject: [PATCH 124/250] feat: `StaticFileProviderFactory` (#7983) --- bin/reth/src/commands/db/clear.rs | 2 +- bin/reth/src/commands/db/get.rs | 1 + bin/reth/src/commands/debug_cmd/execution.rs | 5 ++++- .../src/commands/debug_cmd/in_memory_merkle.rs | 3 ++- bin/reth/src/commands/debug_cmd/replay_engine.rs | 5 ++++- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/stage/drop.rs | 2 +- bin/reth/src/commands/stage/run.rs | 4 +++- bin/reth/src/commands/stage/unwind.rs | 1 + crates/blockchain-tree/src/externals.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 2 +- crates/consensus/beacon/src/engine/test_utils.rs | 1 + crates/node-core/src/init.rs | 2 +- crates/node/builder/src/launch/common.rs | 2 +- crates/prune/src/pruner.rs | 4 +++- crates/stages-api/src/pipeline/mod.rs | 1 + crates/stages/src/lib.rs | 1 + crates/stages/src/sets.rs | 1 + crates/stages/src/stages/bodies.rs | 4 +++- crates/stages/src/stages/execution.rs | 5 ++++- crates/stages/src/stages/headers.rs | 4 +++- crates/stages/src/stages/merkle.rs | 2 +- crates/stages/src/stages/sender_recovery.rs | 5 ++++- crates/stages/src/stages/tx_lookup.rs | 2 +- crates/stages/src/test_utils/macros.rs | 10 +++++----- crates/stages/src/test_utils/test_db.rs | 2 +- crates/static-file/src/static_file_producer.rs | 2 +- .../storage/provider/src/providers/database/mod.rs | 14 ++++++++------ crates/storage/provider/src/providers/mod.rs | 10 ++++++++-- .../provider/src/providers/state/historical.rs | 1 + crates/storage/provider/src/traits/full.rs | 3 +++ crates/storage/provider/src/traits/mod.rs | 3 +++ .../provider/src/traits/static_file_provider.rs | 7 +++++++ 33 files changed, 82 insertions(+), 33 deletions(-) create mode 100644 crates/storage/provider/src/traits/static_file_provider.rs diff --git a/bin/reth/src/commands/db/clear.rs b/bin/reth/src/commands/db/clear.rs index a7c32cac1..f985be8ab 100644 --- a/bin/reth/src/commands/db/clear.rs +++ b/bin/reth/src/commands/db/clear.rs @@ -7,7 +7,7 @@ use reth_db::{ TableViewer, Tables, }; use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; -use reth_provider::ProviderFactory; +use reth_provider::{ProviderFactory, StaticFileProviderFactory}; /// The arguments for the `reth db clear` command #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/get.rs b/bin/reth/src/commands/db/get.rs index 958ced09f..80e3ae393 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/bin/reth/src/commands/db/get.rs @@ -7,6 +7,7 @@ use reth_db::{ tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_primitives::{BlockHash, Header, StaticFileSegment}; +use reth_provider::StaticFileProviderFactory; use tracing::error; /// The arguments for the `reth db get` command diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 2384a9af0..df6b4d111 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -29,7 +29,10 @@ use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; -use reth_provider::{BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, +}; use reth_stages::{ sets::DefaultStages, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 3632f4cff..f13b503f1 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -21,7 +21,8 @@ use reth_node_ethereum::EthEvmConfig; use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; use reth_provider::{ AccountExtReader, ExecutorFactory, HashingWriter, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StorageReader, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, + StorageReader, }; use reth_tasks::TaskExecutor; use reth_trie::{updates::TrieKey, StateRoot}; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index d9b6e9865..f59af6218 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -24,7 +24,10 @@ use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{fs, ChainSpec, PruneModes}; -use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; +use reth_provider::{ + providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory, + StaticFileProviderFactory, +}; use reth_revm::EvmProcessorFactory; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 0136e0e5e..4731bf565 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -31,7 +31,7 @@ use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, - StageCheckpointReader, + StageCheckpointReader, StaticFileProviderFactory, }; use reth_stages::{ prelude::*, diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index e79a4c33b..5c1440602 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -15,7 +15,7 @@ use reth_node_core::init::{insert_genesis_header, insert_genesis_history, insert use reth_primitives::{ fs, stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, }; -use reth_provider::{providers::StaticFileWriter, ProviderFactory}; +use reth_provider::{providers::StaticFileWriter, ProviderFactory, StaticFileProviderFactory}; use std::sync::Arc; /// `reth drop-stage` command diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 32550718f..66fb25b47 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -21,7 +21,9 @@ use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_exex::ExExManagerHandle; use reth_node_ethereum::EthEvmConfig; use reth_primitives::ChainSpec; -use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; +use reth_provider::{ + ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, +}; use reth_stages::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, ExecutionStageThresholds, diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 2682683d4..9ffaad979 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -25,6 +25,7 @@ use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, + StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_stages::{ diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index c3bda1ae2..36f304173 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -6,7 +6,7 @@ use reth_db::{ }; use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; -use reth_provider::{ProviderFactory, StatsReader}; +use reth_provider::{ProviderFactory, StaticFileProviderFactory, StatsReader}; use std::{collections::BTreeMap, sync::Arc}; /// A container for external components. diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 96163e996..9e206176a 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -437,7 +437,7 @@ mod tests { }; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, + BundleStateWithReceipts, StaticFileProviderFactory, }; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_static_file::StaticFileProducer; diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 67225b7c7..513987e75 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -27,6 +27,7 @@ use reth_provider::{ providers::BlockchainProvider, test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, BundleStateWithReceipts, ExecutorFactory, HeaderSyncMode, PrunableBlockExecutor, + StaticFileProviderFactory, }; use reth_prune::Pruner; use reth_revm::EvmProcessorFactory; diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index eb513cc40..883bb437a 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -15,7 +15,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, + ProviderFactory, StaticFileProviderFactory, }; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use serde::{Deserialize, Serialize}; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index c57e12cf6..28453a047 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -17,7 +17,7 @@ use reth_node_core::{ node_config::NodeConfig, }; use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; use reth_prune::PrunerBuilder; use reth_rpc::JwtSecret; use reth_static_file::StaticFileProducer; diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index f3bf963e0..55a998709 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -10,7 +10,9 @@ use reth_primitives::{ BlockNumber, FinishedExExHeight, PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, StaticFileSegment, }; -use reth_provider::{DatabaseProviderRW, ProviderFactory, PruneCheckpointReader}; +use reth_provider::{ + DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, +}; use reth_tokio_util::EventListeners; use std::{ collections::BTreeMap, diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 1b455a939..89bb9107d 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -13,6 +13,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::StaticFileWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, + StaticFileProviderFactory, }; use reth_static_file::StaticFileProducer; use reth_tokio_util::EventListeners; diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 3fea3e04d..92c2b3a09 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -24,6 +24,7 @@ //! # use tokio::sync::watch; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::ProviderFactory; +//! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::HeaderSyncMode; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 4f04e9b10..99edf05b7 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -15,6 +15,7 @@ //! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PruneModes, MAINNET}; //! # use reth_evm_ethereum::EthEvmConfig; +//! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 6dfe7a6a8..5080b9b9e 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -381,6 +381,7 @@ mod tests { use assert_matches::assert_matches; use reth_primitives::stage::StageUnitCheckpoint; + use reth_provider::StaticFileProviderFactory; use test_utils::*; use crate::test_utils::{ @@ -632,7 +633,8 @@ mod tests { StaticFileSegment, TxNumber, B256, }; use reth_provider::{ - providers::StaticFileWriter, HeaderProvider, ProviderFactory, TransactionsProvider, + providers::StaticFileWriter, HeaderProvider, ProviderFactory, + StaticFileProviderFactory, TransactionsProvider, }; use reth_stages_api::{ExecInput, ExecOutput, UnwindInput}; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 6fb6f58e7..1771e2570 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -616,7 +616,10 @@ mod tests { Bytecode, ChainSpecBuilder, PruneMode, ReceiptsLogPruneConfig, SealedBlock, StorageEntry, B256, U256, }; - use reth_provider::{test_utils::create_test_provider_factory, AccountReader, ReceiptProvider}; + use reth_provider::{ + test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, + StaticFileProviderFactory, + }; use reth_revm::EvmProcessorFactory; use std::collections::BTreeMap; diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 548048dd7..f0a8c1811 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -387,7 +387,9 @@ mod tests { use reth_primitives::{ stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, }; - use reth_provider::{BlockWriter, BundleStateWithReceipts, ProviderFactory}; + use reth_provider::{ + BlockWriter, BundleStateWithReceipts, ProviderFactory, StaticFileProviderFactory, + }; use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 562cff183..186382e36 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -377,7 +377,7 @@ mod tests { use reth_primitives::{ keccak256, stage::StageUnitCheckpoint, SealedBlock, StaticFileSegment, StorageEntry, U256, }; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_trie::test_utils::{state_root, state_root_prehashed}; use std::collections::BTreeMap; diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 5ddb2dfc0..e078fd954 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -292,7 +292,10 @@ mod tests { stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, TransactionSigned, B256, }; - use reth_provider::{providers::StaticFileWriter, PruneCheckpointWriter, TransactionsProvider}; + use reth_provider::{ + providers::StaticFileWriter, PruneCheckpointWriter, StaticFileProviderFactory, + TransactionsProvider, + }; use super::*; use crate::test_utils::{ diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 918be21c5..101c52258 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -248,7 +248,7 @@ mod tests { generators::{random_block, random_block_range}, }; use reth_primitives::{stage::StageUnitCheckpoint, BlockNumber, SealedBlock, B256}; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use std::ops::Sub; // Implement stage test suite. diff --git a/crates/stages/src/test_utils/macros.rs b/crates/stages/src/test_utils/macros.rs index 0ce346d70..11fb46cde 100644 --- a/crates/stages/src/test_utils/macros.rs +++ b/crates/stages/src/test_utils/macros.rs @@ -13,7 +13,7 @@ macro_rules! stage_test_suite { // Run stage execution let result = runner.execute(input).await; - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); // Check that the result is returned and the stage does not panic. // The return result with empty db is stage-specific. @@ -46,7 +46,7 @@ macro_rules! stage_test_suite { // Assert the successful result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, @@ -76,7 +76,7 @@ macro_rules! stage_test_suite { // Run stage unwind let rx = runner.unwind(input).await; - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( rx, @@ -110,7 +110,7 @@ macro_rules! stage_test_suite { // Assert the successful execution result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, @@ -179,7 +179,7 @@ macro_rules! stage_test_suite_ext { // Assert the successful result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index a080c9c8f..5fe65a737 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -18,7 +18,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProviderRWRefMut, StaticFileWriter}, - HistoryWriter, ProviderError, ProviderFactory, + HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, }; use std::{collections::BTreeMap, path::Path, sync::Arc}; use tempfile::TempDir; diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 2af4f8cac..0b0720e21 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -255,7 +255,7 @@ mod tests { }; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - ProviderFactory, + ProviderFactory, StaticFileProviderFactory, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::{ diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index a2bf883d5..1e2f73cbc 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -5,7 +5,7 @@ use crate::{ BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_evm::ConfigureEvmEnv; @@ -69,11 +69,6 @@ impl ProviderFactory { &self.db } - /// Returns static file provider - pub fn static_file_provider(&self) -> StaticFileProvider { - self.static_file_provider.clone() - } - #[cfg(any(test, feature = "test-utils"))] /// Consumes Self and returns DB pub fn into_db(self) -> DB { @@ -161,6 +156,13 @@ impl DatabaseProviderFactory for ProviderFactory { } } +impl StaticFileProviderFactory for ProviderFactory { + /// Returns static file provider + fn static_file_provider(&self) -> StaticFileProvider { + self.static_file_provider.clone() + } +} + impl HeaderSyncGapProvider for ProviderFactory { fn sync_gap( &self, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c9ebd042c..f58f77dd0 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -4,8 +4,8 @@ use crate::{ CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, - WithdrawalsProvider, + StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + TreeViewer, WithdrawalsProvider, }; use reth_db::{ database::Database, @@ -142,6 +142,12 @@ where } } +impl StaticFileProviderFactory for BlockchainProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.database.static_file_provider() + } +} + impl HeaderProvider for BlockchainProvider where DB: Database, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index e87be25c9..ed64314aa 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -405,6 +405,7 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, + StaticFileProviderFactory, }; use reth_db::{ models::{storage_sharded_key::StorageShardedKey, AccountBeforeTx, ShardedKey}, diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 78ef74085..9214cc273 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -3,12 +3,14 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, + StaticFileProviderFactory, }; use reth_db::database::Database; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory + + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory @@ -25,6 +27,7 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory + + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 44884acb0..6d78cf583 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -75,6 +75,9 @@ pub use prune_checkpoint::{PruneCheckpointReader, PruneCheckpointWriter}; mod database_provider; pub use database_provider::DatabaseProviderFactory; +mod static_file_provider; +pub use static_file_provider::StaticFileProviderFactory; + mod stats; pub use stats::StatsReader; diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs new file mode 100644 index 000000000..24d695692 --- /dev/null +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -0,0 +1,7 @@ +use crate::providers::StaticFileProvider; + +/// Static file provider factory. +pub trait StaticFileProviderFactory { + /// Create new instance of static file provider. + fn static_file_provider(&self) -> StaticFileProvider; +} From 0f9658cfa17cc9dd97ccf8ee3b7ff855757c4b59 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 30 Apr 2024 13:27:56 +0200 Subject: [PATCH 125/250] chore: use `FnOnce` on `WithLaunchContext` methods (#7989) --- bin/reth/src/optimism.rs | 2 +- crates/node/builder/src/builder/mod.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs index 0c0a483dd..a651314b8 100644 --- a/bin/reth/src/optimism.rs +++ b/bin/reth/src/optimism.rs @@ -31,7 +31,7 @@ fn main() { .node(OptimismNode::new(rollup_args.clone())) .extend_rpc_modules(move |ctx| { // register sequencer tx forwarder - if let Some(sequencer_http) = rollup_args.sequencer_http.clone() { + if let Some(sequencer_http) = rollup_args.sequencer_http { ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( sequencer_http, ))); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 10977360a..7f898ca21 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -316,7 +316,7 @@ where /// Sets the hook that is run once the node's components are initialized. pub fn on_component_initialized(self, hook: F) -> Self where - F: Fn(NodeAdapter, CB::Components>) -> eyre::Result<()> + F: FnOnce(NodeAdapter, CB::Components>) -> eyre::Result<()> + Send + 'static, { @@ -330,7 +330,9 @@ where /// Sets the hook that is run once the node has started. pub fn on_node_started(self, hook: F) -> Self where - F: Fn(FullNode, CB::Components>>) -> eyre::Result<()> + F: FnOnce( + FullNode, CB::Components>>, + ) -> eyre::Result<()> + Send + 'static, { @@ -344,7 +346,7 @@ where /// Sets the hook that is run once the rpc server is started. pub fn on_rpc_started(self, hook: F) -> Self where - F: Fn( + F: FnOnce( RpcContext<'_, NodeAdapter, CB::Components>>, RethRpcServerHandles, ) -> eyre::Result<()> @@ -361,7 +363,7 @@ where /// Sets the hook that is run to configure the rpc modules. pub fn extend_rpc_modules(self, hook: F) -> Self where - F: Fn( + F: FnOnce( RpcContext<'_, NodeAdapter, CB::Components>>, ) -> eyre::Result<()> + Send @@ -381,7 +383,7 @@ where /// The ExEx ID must be unique. pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self where - F: Fn(ExExContext, CB::Components>>) -> R + F: FnOnce(ExExContext, CB::Components>>) -> R + Send + 'static, R: Future> + Send, From c3cdd8c646f123c281c9035a180676b975a740d4 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 30 Apr 2024 13:00:18 +0100 Subject: [PATCH 126/250] feat(examples): sqlite rollup exex (#7826) --- Cargo.lock | 27 + Cargo.toml | 1 + crates/primitives/src/block.rs | 12 +- .../storage/provider/src/bundle_state/mod.rs | 2 +- .../src/bundle_state/state_reverts.rs | 6 +- examples/README.md | 1 + examples/exex/rollup/Cargo.toml | 38 ++ examples/exex/rollup/rollup_abi.json | 626 ++++++++++++++++++ examples/exex/rollup/src/db.rs | 460 +++++++++++++ examples/exex/rollup/src/main.rs | 586 ++++++++++++++++ 10 files changed, 1753 insertions(+), 6 deletions(-) create mode 100644 examples/exex/rollup/Cargo.toml create mode 100644 examples/exex/rollup/rollup_abi.json create mode 100644 examples/exex/rollup/src/db.rs create mode 100644 examples/exex/rollup/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index b283e8eaf..c7eca223b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2881,6 +2881,33 @@ dependencies = [ "tokio", ] +[[package]] +name = "exex-rollup" +version = "0.0.0" +dependencies = [ + "alloy-rlp", + "alloy-sol-types", + "eyre", + "futures", + "once_cell", + "reth", + "reth-cli-runner", + "reth-exex", + "reth-interfaces", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-tracing", + "reth-trie", + "rusqlite", + "secp256k1", + "serde_json", + "tokio", +] + [[package]] name = "eyre" version = "0.6.12" diff --git a/Cargo.toml b/Cargo.toml index 70e36cd94..e7bf2ec5c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,6 +89,7 @@ members = [ "examples/custom-inspector/", "examples/exex/minimal/", "examples/exex/op-bridge/", + "examples/exex/rollup/", "examples/db-access", "testing/ef-tests/", "testing/testing-utils", diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 864e7954f..4b4831b9c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -49,7 +49,7 @@ pub struct Block { } impl Block { - /// Create SealedBLock that will create all header hashes. + /// Calculate the header hash and seal the block so that it can't be changed. pub fn seal_slow(self) -> SealedBlock { SealedBlock { header: self.header.seal_slow(), @@ -175,7 +175,7 @@ impl TryFrom for Block { .collect(), reth_rpc_types::BlockTransactions::Hashes(_) | reth_rpc_types::BlockTransactions::Uncle => { - return Err(ConversionError::MissingFullTransactions); + return Err(ConversionError::MissingFullTransactions) } }; transactions? @@ -214,6 +214,12 @@ impl BlockWithSenders { SealedBlockWithSenders { block: block.seal(hash), senders } } + /// Calculate the header hash and seal the block with senders so that it can't be changed. + #[inline] + pub fn seal_slow(self) -> SealedBlockWithSenders { + SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } + } + /// Split Structure to its components #[inline] pub fn into_components(self) -> (Block, Vec
) { @@ -456,7 +462,7 @@ impl std::ops::DerefMut for SealedBlock { } /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SealedBlockWithSenders { /// Sealed block pub block: SealedBlock, diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index 3f5da6ec6..5df4a213a 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -10,4 +10,4 @@ pub use bundle_state_with_receipts::{ }; pub use hashed_state_changes::HashedStateChanges; pub use state_changes::StateChanges; -pub use state_reverts::StateReverts; +pub use state_reverts::{StateReverts, StorageRevertsIter}; diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index e61572cf5..006f87b40 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -108,7 +108,8 @@ impl StateReverts { /// Iterator over storage reverts. /// See [StorageRevertsIter::next] for more details. -struct StorageRevertsIter { +#[allow(missing_debug_implementations)] +pub struct StorageRevertsIter { reverts: Peekable, wiped: Peekable, } @@ -118,7 +119,8 @@ where R: Iterator, W: Iterator, { - fn new( + /// Create a new iterator over storage reverts. + pub fn new( reverts: impl IntoIterator, wiped: impl IntoIterator, ) -> Self { diff --git a/examples/README.md b/examples/README.md index ea2c87c1b..0885aa294 100644 --- a/examples/README.md +++ b/examples/README.md @@ -27,6 +27,7 @@ to make a PR! | ---------------------------------- | --------------------------------------------------------------------------------- | | [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | | [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | ## RPC diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml new file mode 100644 index 000000000..8d338c241 --- /dev/null +++ b/examples/exex/rollup/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "exex-rollup" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +# reth +reth.workspace = true +reth-cli-runner.workspace = true +reth-exex.workspace = true +reth-interfaces.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true +reth-node-ethereum.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-tracing.workspace = true +reth-trie.workspace = true + +# async +tokio.workspace = true +futures.workspace = true + +# misc +alloy-sol-types = { workspace = true, features = ["json"] } +alloy-rlp.workspace = true +eyre.workspace = true +rusqlite = { version = "0.31.0", features = ["bundled"] } +serde_json.workspace = true +once_cell.workspace = true + +[dev-dependencies] +reth-interfaces = { workspace = true, features = ["test-utils"] } +secp256k1.workspace = true + diff --git a/examples/exex/rollup/rollup_abi.json b/examples/exex/rollup/rollup_abi.json new file mode 100644 index 000000000..08bc23f0e --- /dev/null +++ b/examples/exex/rollup/rollup_abi.json @@ -0,0 +1,626 @@ +[ + { + "inputs": [ + { "internalType": "address", "name": "admin", "type": "address" } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { "inputs": [], "name": "AccessControlBadConfirmation", "type": "error" }, + { + "inputs": [ + { "internalType": "uint48", "name": "schedule", "type": "uint48" } + ], + "name": "AccessControlEnforcedDefaultAdminDelay", + "type": "error" + }, + { + "inputs": [], + "name": "AccessControlEnforcedDefaultAdminRules", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "defaultAdmin", + "type": "address" + } + ], + "name": "AccessControlInvalidDefaultAdmin", + "type": "error" + }, + { + "inputs": [ + { "internalType": "address", "name": "account", "type": "address" }, + { + "internalType": "bytes32", + "name": "neededRole", + "type": "bytes32" + } + ], + "name": "AccessControlUnauthorizedAccount", + "type": "error" + }, + { + "inputs": [ + { "internalType": "uint256", "name": "expected", "type": "uint256" } + ], + "name": "BadSequence", + "type": "error" + }, + { "inputs": [], "name": "BadSignature", "type": "error" }, + { "inputs": [], "name": "BlockExpired", "type": "error" }, + { + "inputs": [ + { + "internalType": "address", + "name": "sequencer", + "type": "address" + } + ], + "name": "NotSequencer", + "type": "error" + }, + { "inputs": [], "name": "OrderExpired", "type": "error" }, + { + "inputs": [ + { "internalType": "uint8", "name": "bits", "type": "uint8" }, + { "internalType": "uint256", "name": "value", "type": "uint256" } + ], + "name": "SafeCastOverflowedUintDowncast", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sequencer", + "type": "address" + }, + { + "components": [ + { + "internalType": "uint256", + "name": "rollupChainId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "sequence", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "confirmBy", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + } + ], + "indexed": true, + "internalType": "struct CalldataZenith.BlockHeader", + "name": "header", + "type": "tuple" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "blockData", + "type": "bytes" + } + ], + "name": "BlockSubmitted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "DefaultAdminDelayChangeCanceled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint48", + "name": "newDelay", + "type": "uint48" + }, + { + "indexed": false, + "internalType": "uint48", + "name": "effectSchedule", + "type": "uint48" + } + ], + "name": "DefaultAdminDelayChangeScheduled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "DefaultAdminTransferCanceled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "newAdmin", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint48", + "name": "acceptSchedule", + "type": "uint48" + } + ], + "name": "DefaultAdminTransferScheduled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "rollupRecipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "Enter", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "hostRecipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "ExitFilled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "previousAdminRole", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "newAdminRole", + "type": "bytes32" + } + ], + "name": "RoleAdminChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleGranted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "role", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "RoleRevoked", + "type": "event" + }, + { "stateMutability": "payable", "type": "fallback" }, + { + "inputs": [], + "name": "DEFAULT_ADMIN_ROLE", + "outputs": [ + { "internalType": "bytes32", "name": "", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "SEQUENCER_ROLE", + "outputs": [ + { "internalType": "bytes32", "name": "", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "acceptDefaultAdminTransfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "address", "name": "newAdmin", "type": "address" } + ], + "name": "beginDefaultAdminTransfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "rollupChainId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "sequence", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "confirmBy", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + } + ], + "internalType": "struct CalldataZenith.BlockHeader", + "name": "header", + "type": "tuple" + }, + { "internalType": "bytes", "name": "blockData", "type": "bytes" } + ], + "name": "blockCommitment", + "outputs": [ + { "internalType": "bytes32", "name": "commit", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "cancelDefaultAdminTransfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "uint48", "name": "newDelay", "type": "uint48" } + ], + "name": "changeDefaultAdminDelay", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "defaultAdmin", + "outputs": [ + { "internalType": "address", "name": "", "type": "address" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "defaultAdminDelay", + "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "defaultAdminDelayIncreaseWait", + "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "rollupRecipient", + "type": "address" + } + ], + "name": "enter", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "internalType": "struct HostPassage.ExitOrder[]", + "name": "orders", + "type": "tuple[]" + } + ], + "name": "fulfillExits", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" } + ], + "name": "getRoleAdmin", + "outputs": [ + { "internalType": "bytes32", "name": "", "type": "bytes32" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "grantRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "hasRole", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "uint256", "name": "", "type": "uint256" } + ], + "name": "nextSequence", + "outputs": [ + { "internalType": "uint256", "name": "", "type": "uint256" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { "internalType": "address", "name": "", "type": "address" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingDefaultAdmin", + "outputs": [ + { + "internalType": "address", + "name": "newAdmin", + "type": "address" + }, + { "internalType": "uint48", "name": "schedule", "type": "uint48" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingDefaultAdminDelay", + "outputs": [ + { "internalType": "uint48", "name": "newDelay", "type": "uint48" }, + { "internalType": "uint48", "name": "schedule", "type": "uint48" } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "renounceRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes32", "name": "role", "type": "bytes32" }, + { "internalType": "address", "name": "account", "type": "address" } + ], + "name": "revokeRole", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "rollbackDefaultAdminDelay", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "rollupChainId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "sequence", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "confirmBy", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + } + ], + "internalType": "struct CalldataZenith.BlockHeader", + "name": "header", + "type": "tuple" + }, + { "internalType": "bytes", "name": "blockData", "type": "bytes" }, + { "internalType": "uint8", "name": "v", "type": "uint8" }, + { "internalType": "bytes32", "name": "r", "type": "bytes32" }, + { "internalType": "bytes32", "name": "s", "type": "bytes32" } + ], + "name": "submitBlock", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "view", + "type": "function" + }, + { "stateMutability": "payable", "type": "receive" } +] diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs new file mode 100644 index 000000000..39c2b418b --- /dev/null +++ b/examples/exex/rollup/src/db.rs @@ -0,0 +1,460 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + str::FromStr, + sync::{Arc, Mutex, MutexGuard}, +}; + +use reth_primitives::{ + revm_primitives::{AccountInfo, Bytecode}, + Address, Bytes, SealedBlockWithSenders, StorageEntry, B256, U256, +}; +use reth_provider::{bundle_state::StorageRevertsIter, OriginalValuesKnown}; +use reth_revm::db::{ + states::{PlainStorageChangeset, PlainStorageRevert}, + BundleState, +}; +use rusqlite::Connection; + +/// Type used to initialize revms bundle state. +type BundleStateInit = + HashMap, Option, HashMap)>; + +/// Types used inside RevertsInit to initialize revms reverts. +pub type AccountRevertInit = (Option>, Vec); + +/// Type used to initialize revms reverts. +pub type RevertsInit = HashMap; + +pub struct Database { + connection: Arc>, +} + +impl Database { + /// Create new database with the provided connection. + pub fn new(connection: Connection) -> eyre::Result { + let database = Self { connection: Arc::new(Mutex::new(connection)) }; + database.create_tables()?; + Ok(database) + } + + fn connection(&self) -> MutexGuard<'_, Connection> { + self.connection.lock().expect("failed to acquire database lock") + } + + fn create_tables(&self) -> eyre::Result<()> { + self.connection().execute_batch( + "CREATE TABLE IF NOT EXISTS block ( + id INTEGER PRIMARY KEY, + number TEXT UNIQUE, + data TEXT + ); + CREATE TABLE IF NOT EXISTS account ( + id INTEGER PRIMARY KEY, + address TEXT UNIQUE, + data TEXT + ); + CREATE TABLE IF NOT EXISTS account_revert ( + id INTEGER PRIMARY KEY, + block_number TEXT, + address TEXT, + data TEXT, + UNIQUE (block_number, address) + ); + CREATE TABLE IF NOT EXISTS storage ( + id INTEGER PRIMARY KEY, + address TEXT, + key TEXT, + data TEXT, + UNIQUE (address, key) + ); + CREATE TABLE IF NOT EXISTS storage_revert ( + id INTEGER PRIMARY KEY, + block_number TEXT, + address TEXT, + key TEXT, + data TEXT, + UNIQUE (block_number, address, key) + ); + CREATE TABLE IF NOT EXISTS bytecode ( + id INTEGER PRIMARY KEY, + hash TEXT UNIQUE, + data TEXT + );", + )?; + Ok(()) + } + + /// Insert block with bundle into the database. + pub fn insert_block_with_bundle( + &self, + block: &SealedBlockWithSenders, + bundle: BundleState, + ) -> eyre::Result<()> { + let mut connection = self.connection(); + let tx = connection.transaction()?; + + tx.execute( + "INSERT INTO block (number, data) VALUES (?, ?)", + (block.header.number.to_string(), serde_json::to_string(block)?), + )?; + + let (changeset, reverts) = bundle.into_plain_state_and_reverts(OriginalValuesKnown::Yes); + + for (address, account) in changeset.accounts { + if let Some(account) = account { + tx.execute( + "INSERT INTO account (address, data) VALUES (?, ?) ON CONFLICT(address) DO UPDATE SET data = excluded.data", + (address.to_string(), serde_json::to_string(&account)?), + )?; + } else { + tx.execute("DELETE FROM account WHERE address = ?", (address.to_string(),))?; + } + } + + if reverts.accounts.len() > 1 { + eyre::bail!("too many blocks in account reverts"); + } + for (address, account) in + reverts.accounts.first().ok_or(eyre::eyre!("no account reverts"))? + { + tx.execute( + "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), serde_json::to_string(account)?), + )?; + } + + for PlainStorageChangeset { address, wipe_storage, storage } in changeset.storage { + if wipe_storage { + tx.execute("DELETE FROM storage WHERE address = ?", (address.to_string(),))?; + } + + for (key, data) in storage { + tx.execute( + "INSERT INTO storage (address, key, data) VALUES (?, ?, ?) ON CONFLICT(address, key) DO UPDATE SET data = excluded.data", + (address.to_string(), B256::from(key).to_string(), data.to_string()), + )?; + } + } + + if reverts.storage.len() > 1 { + eyre::bail!("too many blocks in storage reverts"); + } + for PlainStorageRevert { address, wiped, storage_revert } in + reverts.storage.into_iter().next().ok_or(eyre::eyre!("no storage reverts"))? + { + let storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; + for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { + tx.execute( + "INSERT INTO storage_revert (block_number, address, key, data) VALUES (?, ?, ?, ?) ON CONFLICT(block_number, address, key) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), key.to_string(), data.to_string()), + )?; + } + } + + for (hash, bytecode) in changeset.contracts { + tx.execute( + "INSERT INTO bytecode (hash, data) VALUES (?, ?) ON CONFLICT(hash) DO NOTHING", + (hash.to_string(), bytecode.bytes().to_string()), + )?; + } + + tx.commit()?; + + Ok(()) + } + + /// Reverts the tip block from the database, checking it against the provided block number. + /// + /// The code is adapted from + pub fn revert_tip_block(&self, block_number: U256) -> eyre::Result<()> { + let mut connection = self.connection(); + let tx = connection.transaction()?; + + let tip_block_number = tx + .query_row::( + "SELECT number FROM block ORDER BY number DESC LIMIT 1", + [], + |row| row.get(0), + ) + .map(|data| U256::from_str(&data))??; + if block_number != tip_block_number { + eyre::bail!("Reverts can only be done from the tip. Attempted to revert block {} with tip block {}", block_number, tip_block_number); + } + + tx.execute("DELETE FROM block WHERE number = ?", (block_number.to_string(),))?; + + let mut state = BundleStateInit::new(); + let mut reverts = RevertsInit::new(); + + let account_reverts = tx + .prepare("SELECT address, data FROM account_revert WHERE block_number = ?")? + .query((block_number.to_string(),))? + .mapped(|row| { + Ok(( + Address::from_str(row.get_ref(0)?.as_str()?), + serde_json::from_str::>(row.get_ref(1)?.as_str()?), + )) + }) + .map(|result| { + let (address, data) = result?; + Ok((address?, data?)) + }) + .collect::>>()?; + + for (address, old_info) in account_reverts { + // insert old info into reverts + reverts.entry(address).or_default().0 = Some(old_info.clone()); + + match state.entry(address) { + Entry::Vacant(entry) => { + let new_info = get_account(&tx, address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + Entry::Occupied(mut entry) => { + // overwrite old account state + entry.get_mut().0 = old_info; + } + } + } + + let storage_reverts = tx + .prepare("SELECT address, key, data FROM storage_revert WHERE block_number = ?")? + .query((block_number.to_string(),))? + .mapped(|row| { + Ok(( + Address::from_str(row.get_ref(0)?.as_str()?), + B256::from_str(row.get_ref(1)?.as_str()?), + U256::from_str(row.get_ref(2)?.as_str()?), + )) + }) + .map(|result| { + let (address, key, data) = result?; + Ok((address?, key?, data?)) + }) + .collect::>>()?; + + for (address, key, old_data) in storage_reverts.into_iter().rev() { + let old_storage = StorageEntry { key, value: old_data }; + + // insert old info into reverts + reverts.entry(address).or_default().1.push(old_storage); + + // get account state or insert from plain state + let account_state = match state.entry(address) { + Entry::Vacant(entry) => { + let present_info = get_account(&tx, address)?; + entry.insert((present_info.clone(), present_info, HashMap::new())) + } + Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage + match account_state.2.entry(old_storage.key) { + Entry::Vacant(entry) => { + let new_value = get_storage(&tx, address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_value)); + } + Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + } + + // iterate over local plain state remove all account and all storages + for (address, (old_account, new_account, storage)) in state { + // revert account if needed + if old_account != new_account { + if let Some(account) = old_account { + upsert_account(&tx, address, |_| Ok(account))?; + } else { + delete_account(&tx, address)?; + } + } + + // revert storages + for (storage_key, (old_storage_value, _new_storage_value)) in storage { + // delete previous value + delete_storage(&tx, address, storage_key)?; + + // insert value if needed + if !old_storage_value.is_zero() { + upsert_storage(&tx, address, storage_key, old_storage_value)?; + } + } + } + + tx.commit()?; + + Ok(()) + } + + /// Get block by number. + pub fn get_block(&self, number: U256) -> eyre::Result> { + let block = self.connection().query_row::( + "SELECT data FROM block WHERE number = ?", + (number.to_string(),), + |row| row.get(0), + ); + match block { + Ok(data) => Ok(Some(serde_json::from_str(&data)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } + } + + /// Insert new account if it does not exist, update otherwise. The provided closure is called + /// with the current account, if it exists. + pub fn upsert_account( + &self, + address: Address, + f: impl FnOnce(Option) -> eyre::Result, + ) -> eyre::Result<()> { + upsert_account(&self.connection(), address, f) + } + + /// Get account by address. + pub fn get_account(&self, address: Address) -> eyre::Result> { + get_account(&self.connection(), address) + } +} + +/// Insert new account if it does not exist, update otherwise. The provided closure is called +/// with the current account, if it exists. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn upsert_account( + connection: &Connection, + address: Address, + f: impl FnOnce(Option) -> eyre::Result, +) -> eyre::Result<()> { + let account = get_account(connection, address)?; + let account = f(account)?; + connection.execute( + "INSERT INTO account (address, data) VALUES (?, ?) ON CONFLICT(address) DO UPDATE SET data = excluded.data", + (address.to_string(), serde_json::to_string(&account)?), + )?; + + Ok(()) +} + +/// Delete account by address. Connection can be either [rusqlite::Transaction] or +/// [rusqlite::Connection]. +fn delete_account(connection: &Connection, address: Address) -> eyre::Result<()> { + connection.execute("DELETE FROM account WHERE address = ?", (address.to_string(),))?; + Ok(()) +} + +/// Get account by address using the database connection. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_account(connection: &Connection, address: Address) -> eyre::Result> { + match connection.query_row::( + "SELECT data FROM account WHERE address = ?", + (address.to_string(),), + |row| row.get(0), + ) { + Ok(account_info) => Ok(Some(serde_json::from_str(&account_info)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } +} + +/// Insert new storage if it does not exist, update otherwise. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn upsert_storage( + connection: &Connection, + address: Address, + key: B256, + data: U256, +) -> eyre::Result<()> { + connection.execute( + "INSERT INTO storage (address, key, data) VALUES (?, ?, ?) ON CONFLICT(address, key) DO UPDATE SET data = excluded.data", + (address.to_string(), key.to_string(), data.to_string()), + )?; + Ok(()) +} + +/// Delete storage by address and key. Connection can be either [rusqlite::Transaction] or +/// [rusqlite::Connection]. +fn delete_storage(connection: &Connection, address: Address, key: B256) -> eyre::Result<()> { + connection.execute( + "DELETE FROM storage WHERE address = ? AND key = ?", + (address.to_string(), key.to_string()), + )?; + Ok(()) +} + +/// Get all storages for the provided address using the database connection. Connection can be +/// either [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_storages(connection: &Connection, address: Address) -> eyre::Result> { + connection + .prepare("SELECT key, data FROM storage WHERE address = ?")? + .query((address.to_string(),))? + .mapped(|row| { + Ok(( + B256::from_str(row.get_ref(0)?.as_str()?), + U256::from_str(row.get_ref(1)?.as_str()?), + )) + }) + .map(|result| { + let (key, data) = result?; + Ok((key?, data?)) + }) + .collect() +} + +/// Get storage for the provided address by key using the database connection. Connection can be +/// either [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_storage(connection: &Connection, address: Address, key: B256) -> eyre::Result> { + match connection.query_row::( + "SELECT data FROM storage WHERE address = ? AND key = ?", + (address.to_string(), key.to_string()), + |row| row.get(0), + ) { + Ok(data) => Ok(Some(U256::from_str(&data)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } +} + +impl reth_revm::Database for Database { + type Error = eyre::Report; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.get_account(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + let bytecode = self.connection().query_row::( + "SELECT data FROM bytecode WHERE hash = ?", + (code_hash.to_string(),), + |row| row.get(0), + ); + match bytecode { + Ok(data) => Ok(Bytecode::new_raw(Bytes::from_str(&data).unwrap())), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(Bytecode::default()), + Err(err) => Err(err.into()), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + get_storage(&self.connection(), address, index.into()).map(|data| data.unwrap_or_default()) + } + + fn block_hash(&mut self, number: U256) -> Result { + let block_hash = self.connection().query_row::( + "SELECT hash FROM block WHERE number = ?", + (number.to_string(),), + |row| row.get(0), + ); + match block_hash { + Ok(data) => Ok(B256::from_str(&data).unwrap()), + // No special handling for `QueryReturnedNoRows` is needed, because revm does block + // number bound checks on its own. + // See https://github.com/bluealloy/revm/blob/1ca3d39f6a9e9778f8eb0fcb74fe529345a531b4/crates/interpreter/src/instructions/host.rs#L106-L123. + Err(err) => Err(err.into()), + } + } +} diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs new file mode 100644 index 000000000..cd2b0c94d --- /dev/null +++ b/examples/exex/rollup/src/main.rs @@ -0,0 +1,586 @@ +//! Example of a simple rollup that derives its state from the L1 chain by executing transactions, +//! processing deposits and storing all related data in an SQLite database. +//! +//! The rollup contract accepts blocks of transactions and deposits of ETH and is deployed on +//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . + +use alloy_rlp::Decodable; +use alloy_sol_types::{sol, SolEventInterface, SolInterface}; +use db::Database; +use eyre::OptionExt; +use once_cell::sync::Lazy; +use reth_exex::{ExExContext, ExExEvent}; +use reth_interfaces::executor::BlockValidationError; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeComponents}; +use reth_node_ethereum::{EthEvmConfig, EthereumNode}; +use reth_primitives::{ + address, constants, + revm::env::fill_tx_env, + revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, + Address, Block, BlockWithSenders, Bytes, ChainSpec, ChainSpecBuilder, Genesis, Hardfork, + Header, Receipt, SealedBlockWithSenders, TransactionSigned, U256, +}; +use reth_provider::Chain; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + DatabaseCommit, StateBuilder, +}; +use reth_tracing::tracing::{debug, error, info}; +use rusqlite::Connection; +use std::sync::Arc; + +mod db; + +sol!(RollupContract, "rollup_abi.json"); +use RollupContract::{RollupContractCalls, RollupContractEvents}; + +const DATABASE_PATH: &str = "rollup.db"; +const ROLLUP_CONTRACT_ADDRESS: Address = address!("74ae65DF20cB0e3BF8c022051d0Cdd79cc60890C"); +const ROLLUP_SUBMITTER_ADDRESS: Address = address!("B01042Db06b04d3677564222010DF5Bd09C5A947"); +const CHAIN_ID: u64 = 17001; +static CHAIN_SPEC: Lazy> = Lazy::new(|| { + Arc::new( + ChainSpecBuilder::default() + .chain(CHAIN_ID.into()) + .genesis(Genesis::clique_genesis(CHAIN_ID, ROLLUP_SUBMITTER_ADDRESS)) + .shanghai_activated() + .build(), + ) +}); + +struct Rollup { + ctx: ExExContext, + db: Database, +} + +impl Rollup { + fn new(ctx: ExExContext, connection: Connection) -> eyre::Result { + let db = Database::new(connection)?; + Ok(Self { ctx, db }) + } + + async fn start(mut self) -> eyre::Result<()> { + // Process all new chain state notifications + while let Some(notification) = self.ctx.notifications.recv().await { + if let Some(reverted_chain) = notification.reverted_chain() { + self.revert(&reverted_chain)?; + } + + if let Some(committed_chain) = notification.committed_chain() { + self.commit(&committed_chain)?; + self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + + Ok(()) + } + + /// Process a new chain commit. + /// + /// This function decodes all transactions to the rollup contract into events, executes the + /// corresponding actions and inserts the results into the database. + fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { + let events = decode_chain_into_rollup_events(chain); + + for (_, tx, event) in events { + match event { + // A new block is submitted to the rollup contract. + // The block is executed on top of existing rollup state and committed into the + // database. + RollupContractEvents::BlockSubmitted(_) => { + let call = RollupContractCalls::abi_decode(tx.input(), true)?; + + if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { + header, + blockData, + .. + }) = call + { + match execute_block(&mut self.db, &header, blockData) { + Ok((block, bundle, _, _)) => { + let block = block.seal_slow(); + self.db.insert_block_with_bundle(&block, bundle)?; + info!( + tx_hash = %tx.hash, + chain_id = %header.rollupChainId, + sequence = %header.sequence, + transactions = block.body.len(), + "Block submitted, executed and inserted into database" + ); + } + Err(err) => { + error!( + %err, + tx_hash = %tx.hash, + chain_id = %header.rollupChainId, + sequence = %header.sequence, + "Failed to execute block" + ); + } + } + } + } + // A deposit of ETH to the rollup contract. The deposit is added to the recipient's + // balance and committed into the database. + RollupContractEvents::Enter(RollupContract::Enter { + token, + rollupRecipient, + amount, + }) => { + if token != Address::ZERO { + error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + continue + } + + self.db.upsert_account(rollupRecipient, |account| { + let mut account = account.unwrap_or_default(); + account.balance += amount; + Ok(account) + })?; + + info!( + tx_hash = %tx.hash, + %amount, + recipient = %rollupRecipient, + "Deposit", + ); + } + _ => (), + } + } + + Ok(()) + } + + /// Process a chain revert. + /// + /// This function decodes all transactions to the rollup contract into events, reverts the + /// corresponding actions and updates the database. + fn revert(&mut self, chain: &Chain) -> eyre::Result<()> { + let mut events = decode_chain_into_rollup_events(chain); + // Reverse the order of events to start reverting from the tip + events.reverse(); + + for (_, tx, event) in events { + match event { + // The block is reverted from the database. + RollupContractEvents::BlockSubmitted(_) => { + let call = RollupContractCalls::abi_decode(tx.input(), true)?; + + if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { + header, + .. + }) = call + { + self.db.revert_tip_block(header.sequence)?; + info!( + tx_hash = %tx.hash, + chain_id = %header.rollupChainId, + sequence = %header.sequence, + "Block reverted" + ); + } + } + // The deposit is subtracted from the recipient's balance. + RollupContractEvents::Enter(RollupContract::Enter { + token, + rollupRecipient, + amount, + }) => { + if token != Address::ZERO { + error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + continue + } + + self.db.upsert_account(rollupRecipient, |account| { + let mut account = account.ok_or(eyre::eyre!("account not found"))?; + account.balance -= amount; + Ok(account) + })?; + + info!( + tx_hash = %tx.hash, + %amount, + recipient = %rollupRecipient, + "Deposit reverted", + ); + } + _ => (), + } + } + + Ok(()) + } +} + +/// Decode chain of blocks into a flattened list of receipt logs, filter only transactions to the +/// Rollup contract [ROLLUP_CONTRACT_ADDRESS] and extract [RollupContractEvents]. +fn decode_chain_into_rollup_events( + chain: &Chain, +) -> Vec<(&SealedBlockWithSenders, &TransactionSigned, RollupContractEvents)> { + chain + // Get all blocks and receipts + .blocks_and_receipts() + // Get all receipts + .flat_map(|(block, receipts)| { + block + .body + .iter() + .zip(receipts.iter().flatten()) + .map(move |(tx, receipt)| (block, tx, receipt)) + }) + // Filter only transactions to the rollup contract + .filter(|(_, tx, _)| tx.to() == Some(ROLLUP_CONTRACT_ADDRESS)) + // Get all logs + .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Decode and filter rollup events + .filter_map(|(block, tx, log)| { + RollupContractEvents::decode_raw_log(log.topics(), &log.data.data, true) + .ok() + .map(|event| (block, tx, event)) + }) + .collect() +} + +/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle +/// state)[BundleState] and list of (receipts)[Receipt]. +fn execute_block( + db: &mut Database, + header: &RollupContract::BlockHeader, + block_data: Bytes, +) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { + if header.rollupChainId != U256::from(CHAIN_ID) { + eyre::bail!("Invalid rollup chain ID") + } + + let block_number = u64::try_from(header.sequence)?; + let parent_block = if !header.sequence.is_zero() { + db.get_block(header.sequence - U256::from(1))? + } else { + None + }; + + // Calculate base fee per gas for EIP-1559 transactions + let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; + + // Construct header + let header = Header { + parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), + number: block_number, + gas_limit: u64::try_from(header.gasLimit)?, + timestamp: u64::try_from(header.confirmBy)?, + base_fee_per_gas: Some(base_fee_per_gas), + ..Default::default() + }; + + // Decode block data, filter only transactions with the correct chain ID and recover senders + let transactions = Vec::::decode(&mut block_data.as_ref())? + .into_iter() + .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) + .map(|tx| { + let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; + Ok((tx, sender)) + }) + .collect::>>()?; + + // Execute block + let state = StateBuilder::new_with_database( + Box::new(db) as Box + Send> + ) + .with_bundle_update() + .build(); + let mut evm = EthEvmConfig::default().evm(state); + + // Set state clear flag. + evm.db_mut().set_state_clear_flag( + CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + ); + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); + EthEvmConfig::fill_cfg_and_block_env( + &mut cfg, + evm.block_mut(), + &CHAIN_SPEC, + &header, + U256::ZERO, + ); + *evm.cfg_mut() = cfg.cfg_env; + + let mut receipts = Vec::with_capacity(transactions.len()); + let mut executed_txs = Vec::with_capacity(transactions.len()); + let mut results = Vec::with_capacity(transactions.len()); + if !transactions.is_empty() { + let mut cumulative_gas_used = 0; + for (transaction, sender) in transactions { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + // TODO(alexey): what to do here? + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + // Execute transaction. + // Fill revm structure. + fill_tx_env(evm.tx_mut(), &transaction, sender); + + let ResultAndState { result, state } = match evm.transact() { + Ok(result) => result, + Err(err) => { + match err { + EVMError::Transaction(err) => { + // if the transaction is invalid, we can skip it + debug!(%err, ?transaction, "Skipping invalid transaction"); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + eyre::bail!(err) + } + } + } + }; + + debug!(?transaction, ?result, ?state, "Executed transaction"); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + #[allow(clippy::needless_update)] // side-effect of optimism fields + receipts.push(Receipt { + tx_type: transaction.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().iter().cloned().map(Into::into).collect(), + ..Default::default() + }); + + // append transaction to the list of executed transactions + executed_txs.push(transaction); + results.push(result); + } + + evm.db_mut().merge_transitions(BundleRetention::Reverts); + } + + // Construct block and recover senders + let block = Block { header, body: executed_txs, ..Default::default() } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let bundle = evm.db_mut().take_bundle(); + + Ok((block, bundle, receipts, results)) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("Rollup", move |ctx| async { + let connection = Connection::open(DATABASE_PATH)?; + + Ok(Rollup::new(ctx, connection)?.start()) + }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} + +#[cfg(test)] +mod tests { + use std::time::{SystemTime, UNIX_EPOCH}; + + use alloy_sol_types::{sol, SolCall}; + use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; + use reth_primitives::{ + bytes, + constants::ETH_TO_WEI, + public_key_to_address, + revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, + BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, + }; + use reth_revm::Evm; + use rusqlite::Connection; + use secp256k1::{Keypair, Secp256k1}; + + use crate::{ + db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, + ROLLUP_SUBMITTER_ADDRESS, + }; + + sol!( + WETH, + r#" +[ + { + "constant":true, + "inputs":[ + { + "name":"", + "type":"address" + } + ], + "name":"balanceOf", + "outputs":[ + { + "name":"", + "type":"uint256" + } + ], + "payable":false, + "stateMutability":"view", + "type":"function" + } +] + "# + ); + + #[test] + fn test_execute_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let mut database = Database::new(Connection::open_in_memory()?)?; + + // Create key pair + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut generators::rng()); + let sender_address = public_key_to_address(key_pair.public_key()); + + // Deposit some ETH to the sender and insert it into database + database.upsert_account(sender_address, |_| { + Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) + })?; + + // WETH deployment transaction + let (_, _, results) = execute_transaction( + &mut database, + key_pair, + 0, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 1, + gas_limit: 1_500_000, + gas_price: 1_500_000_000, + to: TxKind::Create, + // WETH9 bytecode + input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), + ..Default::default() + }) + )?; + + let weth_address = match results.first() { + Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { + *address + } + _ => eyre::bail!("WETH contract address not found"), + }; + + // WETH deposit transaction + execute_transaction( + &mut database, + key_pair, + 1, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 2, + gas_limit: 50000, + gas_price: 1_500_000_000, + to: TxKind::Call(weth_address), + value: U256::from(0.5 * ETH_TO_WEI as f64), + input: bytes!("d0e30db0"), + ..Default::default() + }), + )?; + + // Verify WETH balance + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!( + result.output(), + Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) + ); + drop(evm); + + // Verify nonce + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 3); + + // Revert block with WETH deposit transaction + database.revert_tip_block(U256::from(1))?; + + // Verify WETH balance after revert + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); + drop(evm); + + // Verify nonce after revert + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 2); + + Ok(()) + } + + fn execute_transaction( + database: &mut Database, + key_pair: Keypair, + sequence: BlockNumber, + tx: Transaction, + ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { + let signed_tx = sign_tx_with_key_pair(key_pair, tx); + + // Construct block header and data + let block_header = BlockHeader { + rollupChainId: U256::from(CHAIN_ID), + sequence: U256::from(sequence), + confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), + gasLimit: U256::from(30_000_000), + rewardAddress: ROLLUP_SUBMITTER_ADDRESS, + }; + let block_data = alloy_rlp::encode(vec![signed_tx.envelope_encoded()]); + + // Execute block and insert into database + let (block, bundle, receipts, results) = + execute_block(database, &block_header, block_data.into())?; + let block = block.seal_slow(); + database.insert_block_with_bundle(&block, bundle)?; + + Ok((block, receipts, results)) + } +} From a6661d695324b17e4bb59d7af1e343953fbadcab Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 30 Apr 2024 13:19:39 +0100 Subject: [PATCH 127/250] chore(exex): display `exex_id` log field using `Display` (#7994) --- crates/exex/src/manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 81e523718..1037395b8 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -331,7 +331,7 @@ impl Future for ExExManager { // handle incoming exex events for exex in self.exex_handles.iter_mut() { while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { - debug!(exex_id = exex.id, ?event, "Received event from exex"); + debug!(exex_id = %exex.id, ?event, "Received event from exex"); exex.metrics.events_sent_total.increment(1); match event { ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), From 996f1efb731473d7c4c0fe3b0b5e96777a26e376 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 30 Apr 2024 13:25:25 +0100 Subject: [PATCH 128/250] docs(storage): chain of blocks should not be empty (#7809) Co-authored-by: Oliver Nordbjerg --- crates/storage/provider/src/chain.rs | 35 +++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 5acd84599..a596d93ea 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -16,6 +16,10 @@ use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive}; /// changesets for those blocks (and their transactions), as well as the blocks themselves. /// /// Used inside the BlockchainTree. +/// +/// # Warning +/// +/// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Chain { /// All blocks in this chain. @@ -33,16 +37,19 @@ pub struct Chain { impl Chain { /// Create new Chain from blocks and state. + /// + /// # Warning + /// + /// A chain of blocks should not be empty. pub fn new( blocks: impl IntoIterator, state: BundleStateWithReceipts, trie_updates: Option, ) -> Self { - Self { - blocks: BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))), - state, - trie_updates, - } + let blocks = BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))); + debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); + + Self { blocks, state, trie_updates } } /// Create new Chain from a single block and its state. @@ -158,16 +165,20 @@ impl Chain { } /// Get the first block in this chain. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. #[track_caller] pub fn first(&self) -> &SealedBlockWithSenders { - self.blocks.first_key_value().expect("Chain has at least one block for first").1 + self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Get the tip of the chain. /// - /// # Note + /// # Panics /// - /// Chains always have at least one block. + /// If chain doesn't have any blocks. #[track_caller] pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 @@ -179,6 +190,10 @@ impl Chain { } /// Returns the range of block numbers in the chain. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { self.first().number..=self.tip().number } @@ -255,6 +270,10 @@ impl Chain { /// The second chain only contains the changes that were reverted on the first chain; however, /// it retains the up to date state as if the chains were one, i.e. the second chain is an /// extension of the first. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. #[track_caller] pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); From d03150e13c170725bea9045ff985372eeadcb796 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 14:31:16 +0200 Subject: [PATCH 129/250] chore: rm more unused deps (#7995) --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e7bf2ec5c..ab330d87e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -333,7 +333,6 @@ parking_lot = "0.12" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation metrics = "0.21.1" modular-bitfield = "0.11.2" -hex-literal = "0.4" once_cell = "1.17" syn = "2.0" nybbles = "0.2.1" From 29e0e8150c9b979b2672ad257a628a82ef31e68c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 30 Apr 2024 15:05:57 +0200 Subject: [PATCH 130/250] feat: add with_canon_state_notification_sender fn (#7997) --- crates/blockchain-tree/src/blockchain_tree.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b98cc664a..eee4163c7 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -156,6 +156,18 @@ where }) } + /// Replaces the canon state notification sender. + /// + /// Caution: this will close any existing subscriptions to the previous sender. + #[doc(hidden)] + pub fn with_canon_state_notification_sender( + mut self, + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + self.canon_state_notification_sender = canon_state_notification_sender; + self + } + /// Set the sync metric events sender. /// /// A transmitter for sending synchronization metrics. This is used for monitoring the node's From b99d367c1a6c4e5c339eb5e5fc16a2ad77f099e7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 16:17:31 +0200 Subject: [PATCH 131/250] fix: ensure peer exists b4 marking txs as received (#7998) --- crates/net/network/src/transactions/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index f7d03520f..ee14e4c82 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -945,14 +945,13 @@ where return } + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; let mut transactions = transactions.0; // mark the transactions as received self.transaction_fetcher .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.hash())); - let Some(peer) = self.peers.get_mut(&peer_id) else { return }; - // track that the peer knows these transaction, but only if this is a new broadcast. // If we received the transactions as the response to our `GetPooledTransactions`` // requests (based on received `NewPooledTransactionHashes`) then we already From d0973bb6dcc434271bfb83fd6f3dd0bd3bff9d6f Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 16:23:32 +0200 Subject: [PATCH 132/250] fix: correct expiration for discv4 lookup requests (#7996) --- crates/net/discv4/src/config.rs | 6 ++++++ crates/net/discv4/src/lib.rs | 5 +++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 8da6db4b7..c9007a910 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -197,6 +197,12 @@ impl Discv4ConfigBuilder { self } + /// Sets the expiration duration for lookup neighbor requests + pub fn lookup_neighbours_expiration(&mut self, duration: Duration) -> &mut Self { + self.config.neighbours_expiration = duration; + self + } + /// Sets the expiration duration for a bond with a peer pub fn bond_expiration(&mut self, duration: Duration) -> &mut Self { self.config.bond_expiration = duration; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 061e4a33b..ddc9564c2 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1421,7 +1421,7 @@ impl Discv4Service { let mut failed_lookups = Vec::new(); self.pending_lookup.retain(|node_id, (lookup_sent_at, _)| { - if now.duration_since(*lookup_sent_at) > self.config.ping_expiration { + if now.duration_since(*lookup_sent_at) > self.config.request_timeout { failed_lookups.push(*node_id); return false } @@ -1441,7 +1441,7 @@ impl Discv4Service { fn evict_failed_neighbours(&mut self, now: Instant) { let mut failed_neighbours = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { - if now.duration_since(find_node_request.sent_at) > self.config.request_timeout { + if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. @@ -2549,6 +2549,7 @@ mod tests { let config = Discv4Config::builder() .request_timeout(Duration::from_millis(200)) .ping_expiration(Duration::from_millis(200)) + .lookup_neighbours_expiration(Duration::from_millis(200)) .add_eip868_pair("eth", fork_id) .build(); let (_disv4, mut service) = create_discv4_with_config(config).await; From 6d7cd53ad25f0b79c89fd60a4db2a0f2fe097efe Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 16:30:00 +0200 Subject: [PATCH 133/250] chore(discv4): limit number of queued pings (#7999) Co-authored-by: Matthias Seitz --- crates/net/discv4/src/lib.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index ddc9564c2..b6fb97827 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -119,10 +119,21 @@ const MIN_PACKET_SIZE: usize = 32 + 65 + 1; /// Concurrency factor for `FindNode` requests to pick `ALPHA` closest nodes, const ALPHA: usize = 3; -/// Maximum number of nodes to ping at concurrently. 2 full `Neighbours` responses with 16 _new_ -/// nodes. This will apply some backpressure in recursive lookups. +/// Maximum number of nodes to ping at concurrently. +/// +/// This corresponds to 2 full `Neighbours` responses with 16 _new_ nodes. This will apply some +/// backpressure in recursive lookups. const MAX_NODES_PING: usize = 2 * MAX_NODES_PER_BUCKET; +/// Maximum number of pings to keep queued. +/// +/// If we are currently sending too many pings, any new pings will be queued. To prevent unbounded +/// growth of the queue, the queue has a maximum capacity, after which any additional pings will be +/// discarded. +/// +/// This corresponds to 2 full `Neighbours` responses with 16 new nodes. +const MAX_QUEUED_PINGS: usize = 2 * MAX_NODES_PER_BUCKET; + /// The size of the datagram is limited [`MAX_PACKET_SIZE`], 16 nodes, as the discv4 specifies don't /// fit in one datagram. The safe number of nodes that always fit in a datagram is 12, with worst /// case all of them being IPv6 nodes. This is calculated by `(MAX_PACKET_SIZE - (header + expire + @@ -570,7 +581,7 @@ impl Discv4Service { _tasks: tasks, ingress: ingress_rx, egress: egress_tx, - queued_pings: Default::default(), + queued_pings: VecDeque::with_capacity(MAX_QUEUED_PINGS), pending_pings: Default::default(), pending_lookup: Default::default(), pending_find_nodes: Default::default(), @@ -1131,7 +1142,7 @@ impl Discv4Service { if self.pending_pings.len() < MAX_NODES_PING { self.send_ping(node, reason); - } else { + } else if self.queued_pings.len() < MAX_QUEUED_PINGS { self.queued_pings.push_back((node, reason)); } } From e158542d31bf576e8a6b6e61337b62f9839734cf Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Tue, 30 Apr 2024 17:33:30 +0200 Subject: [PATCH 134/250] feat: ensure offset size is at most 8 bytes (#8000) --- crates/storage/nippy-jar/src/error.rs | 5 +++++ crates/storage/nippy-jar/src/lib.rs | 17 +++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index c769f0db8..3763be3dc 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -37,6 +37,11 @@ pub enum NippyJarError { PHFMissing, #[error("nippy jar was built without an index")] UnsupportedFilterQuery, + #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] + OffsetSizeTooBig { + /// The read offset size in number of bytes. + offset_size: u64, + }, #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, #[error("dictionary is not loaded.")] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index cc4f2b0f5..59fc586e4 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -486,14 +486,15 @@ impl DataReader { // SAFETY: File is read-only and its descriptor is kept alive as long as the mmap handle. let offset_mmap = unsafe { Mmap::map(&offset_file)? }; - Ok(Self { - data_file, - data_mmap, - offset_file, - // First byte is the size of one offset in bytes - offset_size: offset_mmap[0] as u64, - offset_mmap, - }) + // First byte is the size of one offset in bytes + let offset_size = offset_mmap[0] as u64; + + // Ensure that the size of an offset is at most 8 bytes. + if offset_size > 8 { + return Err(NippyJarError::OffsetSizeTooBig { offset_size }) + } + + Ok(Self { data_file, data_mmap, offset_file, offset_size, offset_mmap }) } /// Returns the offset for the requested data index From 9153d8848f56e8651c0becd95e2e0e565132b1da Mon Sep 17 00:00:00 2001 From: Abner Zheng Date: Wed, 1 May 2024 00:14:25 +0800 Subject: [PATCH 135/250] chore: remove unnecessary Debug implmentation (#8001) --- crates/rpc/rpc-builder/src/auth.rs | 14 +------------- crates/rpc/rpc-builder/src/lib.rs | 14 ++------------ 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 372617257..72345aca6 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -34,7 +34,6 @@ use reth_rpc_api::servers::*; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; use std::{ - fmt, net::{IpAddr, Ipv4Addr, SocketAddr}, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -218,6 +217,7 @@ impl AuthServerConfig { } /// Builder type for configuring an `AuthServerConfig`. +#[derive(Debug)] pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, @@ -226,18 +226,6 @@ pub struct AuthServerConfigBuilder { ipc_endpoint: Option, } -impl fmt::Debug for AuthServerConfigBuilder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AuthServerConfig") - .field("socket_addr", &self.socket_addr) - .field("secret", &self.secret) - .field("server_config", &self.server_config) - .field("ipc_server_config", &self.ipc_server_config) - .field("ipc_endpoint", &self.ipc_endpoint) - .finish() - } -} - // === impl AuthServerConfigBuilder === impl AuthServerConfigBuilder { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4bd367060..cea80398e 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -2113,7 +2113,7 @@ impl fmt::Debug for RpcServer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RpcServer") .field("http", &self.ws_http.http_local_addr.is_some()) - .field("ws", &self.ws_http.http_local_addr.is_some()) + .field("ws", &self.ws_http.ws_local_addr.is_some()) .field("ipc", &self.ipc.is_some()) .finish() } @@ -2122,7 +2122,7 @@ impl fmt::Debug for RpcServer { /// A handle to the spawned servers. /// /// When this type is dropped or [RpcServerHandle::stop] has been called the server will be stopped. -#[derive(Clone)] +#[derive(Clone, Debug)] #[must_use = "Server stops if dropped"] pub struct RpcServerHandle { /// The address of the http/ws server @@ -2225,16 +2225,6 @@ impl RpcServerHandle { } } -impl fmt::Debug for RpcServerHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServerHandle") - .field("http", &self.http.is_some()) - .field("ws", &self.ws.is_some()) - .field("ipc", &self.ipc.is_some()) - .finish() - } -} - #[cfg(test)] mod tests { use super::*; From 581682605cc86d2c3eeaa69ab829340422c8f93b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 30 Apr 2024 18:38:52 +0200 Subject: [PATCH 136/250] fix(net, discv4): call find_node with valid endpoint (#8002) --- crates/net/discv4/src/lib.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index b6fb97827..1a942a5b9 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1387,7 +1387,16 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - _ => self.find_node(&closest, ctx.clone()), + BucketEntry::Present(mut entry, _) => { + if entry.value_mut().has_endpoint_proof { + self.find_node(&closest, ctx.clone()); + } + } + BucketEntry::Pending(mut entry, _) => { + if entry.value().has_endpoint_proof { + self.find_node(&closest, ctx.clone()); + } + } } } } From d04d9556fa4954340e0c2518cc93f095c36631ff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 30 Apr 2024 18:56:34 +0200 Subject: [PATCH 137/250] feat: make nodetypes stateless and move evm to components (#7992) --- crates/blockchain-tree/src/noop.rs | 9 + crates/evm/src/lib.rs | 2 +- crates/node-ethereum/src/node.rs | 44 +++-- crates/node-ethereum/tests/it/builder.rs | 2 +- crates/node-ethereum/tests/it/exex.rs | 2 +- crates/node/api/src/node.rs | 38 ++-- crates/node/builder/src/builder/mod.rs | 41 ++--- crates/node/builder/src/builder/states.rs | 23 +-- crates/node/builder/src/components/builder.rs | 166 ++++++++++++++---- crates/node/builder/src/components/execute.rs | 34 ++++ crates/node/builder/src/components/mod.rs | 27 ++- crates/node/builder/src/launch/mod.rs | 64 ++++--- crates/node/builder/src/rpc.rs | 2 +- crates/optimism/node/src/node.rs | 62 +++++-- crates/optimism/node/tests/it/builder.rs | 2 +- crates/storage/provider/src/providers/mod.rs | 7 + examples/custom-engine-types/src/main.rs | 13 +- examples/custom-evm/src/main.rs | 29 +-- examples/custom-node-components/src/main.rs | 2 +- examples/custom-payload-builder/src/main.rs | 2 +- 20 files changed, 401 insertions(+), 170 deletions(-) create mode 100644 crates/node/builder/src/components/execute.rs diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 9fa820255..bb99f9b55 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -27,6 +27,15 @@ pub struct NoopBlockchainTree { pub canon_state_notification_sender: Option, } +impl NoopBlockchainTree { + /// Create a new NoopBlockchainTree with a canon state notification sender. + pub fn with_canon_state_notifications( + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + Self { canon_state_notification_sender: Some(canon_state_notification_sender) } + } +} + impl BlockchainTreeEngine for NoopBlockchainTree { fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { Ok(()) diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 78a76e54c..9179abc33 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -80,7 +80,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. -pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone { +pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The type of the transaction metadata that should be used to fill fields in the transaction /// environment. /// diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 5a1a03554..4f52027b4 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -4,7 +4,9 @@ use crate::{EthEngineTypes, EthEvmConfig}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_network::NetworkHandle; use reth_node_builder::{ - components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + components::{ + ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + }, node::{FullNodeTypes, NodeTypes}, BuilderContext, Node, PayloadBuilderConfig, }; @@ -23,8 +25,13 @@ pub struct EthereumNode; impl EthereumNode { /// Returns a [ComponentsBuilder] configured for a regular Ethereum node. - pub fn components( - ) -> ComponentsBuilder + pub fn components() -> ComponentsBuilder< + Node, + EthereumPoolBuilder, + EthereumPayloadBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + > where Node: FullNodeTypes, { @@ -33,31 +40,48 @@ impl EthereumNode { .pool(EthereumPoolBuilder::default()) .payload(EthereumPayloadBuilder::default()) .network(EthereumNetworkBuilder::default()) + .executor(EthereumExecutorBuilder::default()) } } impl NodeTypes for EthereumNode { type Primitives = (); type Engine = EthEngineTypes; - type Evm = EthEvmConfig; - - fn evm_config(&self) -> Self::Evm { - EthEvmConfig::default() - } } impl Node for EthereumNode where N: FullNodeTypes, { - type ComponentsBuilder = - ComponentsBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + EthereumPoolBuilder, + EthereumPayloadBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + >; fn components_builder(self) -> Self::ComponentsBuilder { Self::components() } } +/// A regular ethereum evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct EthereumExecutorBuilder; + +impl ExecutorBuilder for EthereumExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + + async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(EthEvmConfig::default()) + } +} + /// A basic ethereum transaction pool. /// /// This contains various settings that can be configured and take precedence over the node's diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/node-ethereum/tests/it/builder.rs index 1f4579236..b48e58679 100644 --- a/crates/node-ethereum/tests/it/builder.rs +++ b/crates/node-ethereum/tests/it/builder.rs @@ -13,7 +13,7 @@ fn test_basic_setup() { let msg = "On components".to_string(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(EthereumNode::default()) + .with_types::() .with_components(EthereumNode::components()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/node-ethereum/tests/it/exex.rs index bbab6d9dc..80366ba23 100644 --- a/crates/node-ethereum/tests/it/exex.rs +++ b/crates/node-ethereum/tests/it/exex.rs @@ -31,7 +31,7 @@ fn basic_exex() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(EthereumNode::default()) + .with_types::() .with_components(EthereumNode::components()) .install_exex("dummy", move |ctx| future::ok(DummyExEx { _ctx: ctx })) .check_launch(); diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 2eb14011f..0a76f7504 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,4 +1,4 @@ -//! Traits for configuring a node +//! Traits for configuring a node. use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; use reth_db::{ @@ -15,21 +15,20 @@ use std::marker::PhantomData; /// The type that configures the essential types of an ethereum like node. /// /// This includes the primitive types of a node, the engine API types for communication with the -/// consensus layer, and the EVM configuration type for setting up the Ethereum Virtual Machine. +/// consensus layer. +/// +/// This trait is intended to be stateless and only define the types of the node. pub trait NodeTypes: Send + Sync + 'static { /// The node's primitive types, defining basic operations and structures. type Primitives: NodePrimitives; /// The node's engine types, defining the interaction with the consensus engine. type Engine: EngineTypes; - /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm; - - /// Returns the node's evm config. - fn evm_config(&self) -> Self::Evm; } /// A helper trait that is downstream of the [NodeTypes] trait and adds stateful components to the /// node. +/// +/// Its types are configured by node internally and are not intended to be user configurable. pub trait FullNodeTypes: NodeTypes + 'static { /// Underlying database type used by the node to store and retrieve data. type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; @@ -41,7 +40,7 @@ pub trait FullNodeTypes: NodeTypes + 'static { #[derive(Debug)] pub struct FullNodeTypesAdapter { /// An instance of the user configured node types. - pub types: Types, + pub types: PhantomData, /// The database type used by the node. pub db: PhantomData, /// The provider type used by the node. @@ -49,9 +48,15 @@ pub struct FullNodeTypesAdapter { } impl FullNodeTypesAdapter { - /// Create a new adapter from the given node types. - pub fn new(types: Types) -> Self { - Self { types, db: Default::default(), provider: Default::default() } + /// Create a new adapter with the configured types. + pub fn new() -> Self { + Self { types: Default::default(), db: Default::default(), provider: Default::default() } + } +} + +impl Default for FullNodeTypesAdapter { + fn default() -> Self { + Self::new() } } @@ -63,11 +68,6 @@ where { type Primitives = Types::Primitives; type Engine = Types::Engine; - type Evm = Types::Evm; - - fn evm_config(&self) -> Self::Evm { - self.types.evm_config() - } } impl FullNodeTypes for FullNodeTypesAdapter @@ -85,9 +85,15 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// The transaction pool of the node. type Pool: TransactionPool + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: ConfigureEvm; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 7f898ca21..815b13858 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -50,10 +50,10 @@ pub type RethFullAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter { @@ -187,12 +188,11 @@ where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { /// Configures the types of the node. - pub fn with_types(self, types: T) -> NodeBuilderWithTypes> + pub fn with_types(self) -> NodeBuilderWithTypes> where T: NodeTypes, { - let types = FullNodeTypesAdapter::new(types); - NodeBuilderWithTypes::new(self.config, types, self.database) + NodeBuilderWithTypes::new(self.config, self.database) } /// Preconfigures the node with a specific node implementation. @@ -205,7 +205,7 @@ where where N: Node>, { - self.with_types(node.clone()).with_components(node.components_builder()) + self.with_types().with_components(node.components_builder()) } } @@ -236,15 +236,12 @@ where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { /// Configures the types of the node. - pub fn with_types( - self, - types: T, - ) -> WithLaunchContext>> + pub fn with_types(self) -> WithLaunchContext>> where T: NodeTypes, { WithLaunchContext { - builder: self.builder.with_types(types), + builder: self.builder.with_types(), task_executor: self.task_executor, data_dir: self.data_dir, } @@ -260,7 +257,7 @@ where where N: Node>, { - self.with_types(node.clone()).with_components(node.components_builder()) + self.with_types().with_components(node.components_builder()) } /// Launches a preconfigured [Node] @@ -428,8 +425,6 @@ pub struct BuilderContext { pub(crate) config: NodeConfig, /// loaded config pub(crate) reth_config: reth_config::Config, - /// EVM config of the node - pub(crate) evm_config: Node::Evm, } impl BuilderContext { @@ -441,9 +436,8 @@ impl BuilderContext { data_dir: ChainPath, config: NodeConfig, reth_config: reth_config::Config, - evm_config: Node::Evm, ) -> Self { - Self { head, provider, executor, data_dir, config, reth_config, evm_config } + Self { head, provider, executor, data_dir, config, reth_config } } /// Returns the configured provider to interact with the blockchain. @@ -451,11 +445,6 @@ impl BuilderContext { &self.provider } - /// Returns the configured evm. - pub fn evm_config(&self) -> &Node::Evm { - &self.evm_config - } - /// Returns the current head of the blockchain at launch. pub fn head(&self) -> Head { self.head diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index b77588df4..753978de1 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -31,8 +31,8 @@ pub struct NodeBuilderWithTypes { impl NodeBuilderWithTypes { /// Creates a new instance of the node builder with the given configuration and types. - pub fn new(config: NodeConfig, types: T, database: T::DB) -> Self { - Self { config, adapter: NodeTypesAdapter::new(types, database) } + pub fn new(config: NodeConfig, database: T::DB) -> Self { + Self { config, adapter: NodeTypesAdapter::new(database) } } /// Advances the state of the node builder to the next state where all components are configured @@ -59,14 +59,12 @@ impl NodeBuilderWithTypes { pub(crate) struct NodeTypesAdapter { /// The database type used by the node. pub(crate) database: T::DB, - // TODO(mattsse): make this stateless - pub(crate) types: T, } impl NodeTypesAdapter { /// Create a new adapter from the given node types. - pub(crate) fn new(types: T, database: T::DB) -> Self { - Self { types, database } + pub(crate) fn new(database: T::DB) -> Self { + Self { database } } } @@ -85,18 +83,11 @@ pub struct NodeAdapter> { pub task_executor: TaskExecutor, /// The provider of the node. pub provider: T::Provider, - /// EVM config - pub evm: T::Evm, } impl> NodeTypes for NodeAdapter { type Primitives = T::Primitives; type Engine = T::Engine; - type Evm = T::Evm; - - fn evm_config(&self) -> Self::Evm { - self.evm.clone() - } } impl> FullNodeTypes for NodeAdapter { @@ -106,11 +97,16 @@ impl> FullNodeTypes for NodeAdapter impl> FullNodeComponents for NodeAdapter { type Pool = C::Pool; + type Evm = C::Evm; fn pool(&self) -> &Self::Pool { self.components.pool() } + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } + fn provider(&self) -> &Self::Provider { &self.provider } @@ -134,7 +130,6 @@ impl> Clone for NodeAdapter { components: self.components.clone(), task_executor: self.task_executor.clone(), provider: self.provider.clone(), - evm: self.evm.clone(), } } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 1c963f024..d17cdc8ee 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -1,13 +1,16 @@ //! A generic [NodeComponentsBuilder] use crate::{ - components::{Components, NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, - BuilderContext, FullNodeTypes, + components::{ + Components, ExecutorBuilder, NetworkBuilder, NodeComponents, PayloadServiceBuilder, + PoolBuilder, + }, + BuilderContext, ConfigureEvm, FullNodeTypes, }; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; -/// A generic, customizable [`NodeComponentsBuilder`]. +/// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// /// This type is stateful and captures the configuration of the node's components. /// @@ -27,21 +30,31 @@ use std::{future::Future, marker::PhantomData}; /// All component builders are captured in the builder state and will be consumed once the node is /// launched. #[derive(Debug)] -pub struct ComponentsBuilder { +pub struct ComponentsBuilder { pool_builder: PoolB, payload_builder: PayloadB, network_builder: NetworkB, + executor_builder: ExecB, _marker: PhantomData, } -impl ComponentsBuilder { +impl + ComponentsBuilder +{ /// Configures the node types. - pub fn node_types(self) -> ComponentsBuilder + pub fn node_types(self) -> ComponentsBuilder where Types: FullNodeTypes, { - let Self { pool_builder, payload_builder, network_builder, _marker } = self; + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; ComponentsBuilder { + executor_builder: evm_builder, pool_builder, payload_builder, network_builder, @@ -55,6 +68,7 @@ impl ComponentsBuilder ComponentsBuilder ComponentsBuilder ExecB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: f(self.executor_builder), _marker: self._marker, } } } -impl ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, { @@ -88,16 +116,32 @@ where /// /// This accepts a [PoolBuilder] instance that will be used to create the node's transaction /// pool. - pub fn pool(self, pool_builder: PB) -> ComponentsBuilder + pub fn pool( + self, + pool_builder: PB, + ) -> ComponentsBuilder where PB: PoolBuilder, { - let Self { pool_builder: _, payload_builder, network_builder, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } + let Self { + pool_builder: _, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } } } -impl ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -106,57 +150,118 @@ where /// /// This accepts a [NetworkBuilder] instance that will be used to create the node's network /// stack. - pub fn network(self, network_builder: NB) -> ComponentsBuilder + pub fn network( + self, + network_builder: NB, + ) -> ComponentsBuilder where NB: NetworkBuilder, { - let Self { pool_builder, payload_builder, network_builder: _, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } + let Self { + pool_builder, + payload_builder, + network_builder: _, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } } /// Configures the payload builder. /// /// This accepts a [PayloadServiceBuilder] instance that will be used to create the node's /// payload builder service. - pub fn payload(self, payload_builder: PB) -> ComponentsBuilder + pub fn payload( + self, + payload_builder: PB, + ) -> ComponentsBuilder where PB: PayloadServiceBuilder, { - let Self { pool_builder, payload_builder: _, network_builder, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } + let Self { + pool_builder, + payload_builder: _, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } + } + + /// Configures the executor builder. + /// + /// This accepts a [ExecutorBuilder] instance that will be used to create the node's components + /// for execution. + pub fn executor( + self, + executor_builder: EB, + ) -> ComponentsBuilder + where + EB: ExecutorBuilder, + { + let Self { pool_builder, payload_builder, network_builder, executor_builder: _, _marker } = + self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder, + _marker, + } } } -impl NodeComponentsBuilder - for ComponentsBuilder +impl NodeComponentsBuilder + for ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, NetworkB: NetworkBuilder, PayloadB: PayloadServiceBuilder, + ExecB: ExecutorBuilder, { - type Components = Components; + type Components = Components; async fn build_components( self, context: &BuilderContext, ) -> eyre::Result { - let Self { pool_builder, payload_builder, network_builder, _marker } = self; + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + let evm_config = evm_builder.build_evm(context).await?; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - Ok(Components { transaction_pool: pool, network, payload_builder }) + Ok(Components { transaction_pool: pool, evm_config, network, payload_builder }) } } -impl Default for ComponentsBuilder<(), (), (), ()> { +impl Default for ComponentsBuilder<(), (), (), (), ()> { fn default() -> Self { Self { pool_builder: (), payload_builder: (), network_builder: (), + executor_builder: (), _marker: Default::default(), } } @@ -167,9 +272,9 @@ impl Default for ComponentsBuilder<(), (), (), ()> { /// Implementers of this trait are responsible for building all the components of the node: See /// [NodeComponents]. /// -/// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize -/// certain components of the node using the builder pattern and defaults, e.g. Ethereum and -/// Optimism. +/// The [ComponentsBuilder] is a generic, general purpose implementation of this trait that can be +/// used to customize certain components of the node using the builder pattern and defaults, e.g. +/// Ethereum and Optimism. /// A type that's responsible for building the components of the node. pub trait NodeComponentsBuilder: Send { /// The components for the node with the given types @@ -182,14 +287,15 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs new file mode 100644 index 000000000..417423d54 --- /dev/null +++ b/crates/node/builder/src/components/execute.rs @@ -0,0 +1,34 @@ +//! EVM component for the node builder. +use crate::{BuilderContext, FullNodeTypes}; +use reth_node_api::ConfigureEvm; +use std::future::Future; + +/// A type that knows how to build the executor types. +pub trait ExecutorBuilder: Send { + /// The EVM config to build. + type EVM: ConfigureEvm; + // TODO(mattsse): integrate `Executor` + + /// Creates the transaction pool. + fn build_evm( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send; +} + +impl ExecutorBuilder for F +where + Node: FullNodeTypes, + EVM: ConfigureEvm, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: Future> + Send, +{ + type EVM = EVM; + + fn build_evm( + self, + ctx: &BuilderContext, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index ea087ece2..24d83da0d 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -7,8 +7,9 @@ //! //! Components depend on a fully type configured node: [FullNodeTypes](crate::node::FullNodeTypes). -use crate::FullNodeTypes; +use crate::{ConfigureEvm, FullNodeTypes}; pub use builder::*; +pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; @@ -17,11 +18,13 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; mod builder; +mod execute; mod network; mod payload; mod pool; /// An abstraction over the components of a node, consisting of: +/// - evm and executor /// - transaction pool /// - network /// - payload builder. @@ -29,9 +32,15 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// The transaction pool of the node. type Pool: TransactionPool + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: ConfigureEvm; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + /// Returns the handle to the network fn network(&self) -> &NetworkHandle; @@ -43,26 +52,34 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + pub evm_config: EVM, /// The network implementation of the node. pub network: NetworkHandle, /// The handle to the payload builder service. pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents for Components +impl NodeComponents for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { type Pool = Pool; + type Evm = EVM; fn pool(&self) -> &Self::Pool { &self.transaction_pool } + fn evm_config(&self) -> &Self::Evm { + &self.evm_config + } + fn network(&self) -> &NetworkHandle { &self.network } @@ -72,14 +89,16 @@ where } } -impl Clone for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, + EVM: ConfigureEvm, { fn clone(&self) -> Self { Self { transaction_pool: self.transaction_pool.clone(), + evm_config: self.evm_config.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4f1f00e4e..bd81f8386 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -40,6 +40,7 @@ use tokio::sync::{mpsc::unbounded_channel, oneshot}; pub mod common; pub use common::LaunchContext; +use reth_blockchain_tree::noop::NoopBlockchainTree; /// A general purpose trait that launches a new node of any kind. /// @@ -83,7 +84,7 @@ where ) -> eyre::Result { let Self { ctx } = self; let NodeBuilderWithComponents { - adapter: NodeTypesAdapter { types, database }, + adapter: NodeTypesAdapter { database }, components_builder, add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, config, @@ -124,27 +125,22 @@ where let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); + // fetch the head block from the database + let head = ctx.lookup_head()?; + // Configure the blockchain tree for the node - let evm_config = types.evm_config(); let tree_config = BlockchainTreeConfig::default(); - let tree_externals = TreeExternals::new( - ctx.provider_factory().clone(), - consensus.clone(), - EvmProcessorFactory::new(ctx.chain_spec(), evm_config.clone()), - ); - let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? - .with_sync_metrics_tx(sync_metrics_tx.clone()); - let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - debug!(target: "reth::cli", "configured blockchain tree"); - - // fetch the head block from the database - let head = ctx.lookup_head()?; + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the blockchain provider - let blockchain_db = - BlockchainProvider::new(ctx.provider_factory().clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new( + ctx.provider_factory().clone(), + Arc::new(NoopBlockchainTree::with_canon_state_notifications( + canon_state_notification_sender.clone(), + )), + )?; let builder_ctx = BuilderContext::new( head, @@ -153,19 +149,37 @@ where ctx.data_dir().clone(), ctx.node_config().clone(), ctx.toml_config().clone(), - evm_config.clone(), ); debug!(target: "reth::cli", "creating components"); let components = components_builder.build_components(&builder_ctx).await?; + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + EvmProcessorFactory::new(ctx.chain_spec(), components.evm_config().clone()), + ); + let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? + .with_sync_metrics_tx(sync_metrics_tx.clone()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let canon_state_notification_sender = tree.canon_state_notification_sender(); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + // Replace the tree component with the actual tree + let blockchain_db = blockchain_db.with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; let node_adapter = NodeAdapter { components, task_executor: ctx.task_executor().clone(), provider: blockchain_db.clone(), - evm: evm_config.clone(), }; debug!(target: "reth::cli", "calling on_component_initialized hook"); @@ -225,7 +239,7 @@ where }); // send notifications from the blockchain tree to exex manager - let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); + let mut canon_state_notifications = blockchain_db.subscribe_to_canonical_state(); let mut handle = exex_manager_handle.clone(); ctx.task_executor().spawn_critical( "exex manager blockchain tree notifications", @@ -305,7 +319,7 @@ address.to_string(), format_ether(alloc.balance)); consensus_engine_tx.clone(), canon_state_notification_sender, mining_mode, - evm_config.clone(), + node_adapter.components.evm_config().clone(), ) .build(); @@ -320,7 +334,7 @@ address.to_string(), format_ether(alloc.balance)); ctx.prune_config(), max_block, static_file_producer, - evm_config, + node_adapter.components.evm_config().clone(), pipeline_exex_handle, ) .await?; @@ -343,7 +357,7 @@ address.to_string(), format_ether(alloc.balance)); ctx.prune_config(), max_block, static_file_producer, - evm_config, + node_adapter.components.evm_config().clone(), pipeline_exex_handle, ) .await?; @@ -447,7 +461,7 @@ address.to_string(), format_ether(alloc.balance)); }); let full_node = FullNode { - evm_config: node_adapter.evm.clone(), + evm_config: node_adapter.components.evm_config().clone(), pool: node_adapter.components.pool().clone(), network: node_adapter.components.network().clone(), provider: node_adapter.provider.clone(), diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index a65dcfce5..3ac553fa3 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -274,7 +274,7 @@ where .with_network(node.network().clone()) .with_events(node.provider().clone()) .with_executor(node.task_executor().clone()) - .with_evm_config(node.evm_config()) + .with_evm_config(node.evm_config().clone()) .build_with_auth_server(module_config, engine_api); let mut registry = RpcRegistry { registry }; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 8f6a3c19b..a7b195f48 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -6,10 +6,13 @@ use crate::{ OptimismEngineTypes, }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm::ConfigureEvm; use reth_evm_optimism::OptimismEvmConfig; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ - components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + components::{ + ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + }, node::{FullNodeTypes, NodeTypes}, BuilderContext, Node, PayloadBuilderConfig, }; @@ -38,7 +41,13 @@ impl OptimismNode { /// Returns the components for the given [RollupArgs]. pub fn components( args: RollupArgs, - ) -> ComponentsBuilder + ) -> ComponentsBuilder< + Node, + OptimismPoolBuilder, + OptimismPayloadBuilder, + OptimismNetworkBuilder, + OptimismExecutorBuilder, + > where Node: FullNodeTypes, { @@ -46,8 +55,12 @@ impl OptimismNode { ComponentsBuilder::default() .node_types::() .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) + .payload(OptimismPayloadBuilder::new( + compute_pending_block, + OptimismEvmConfig::default(), + )) .network(OptimismNetworkBuilder { disable_txpool_gossip }) + .executor(OptimismExecutorBuilder::default()) } } @@ -55,8 +68,13 @@ impl Node for OptimismNode where N: FullNodeTypes, { - type ComponentsBuilder = - ComponentsBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + OptimismPoolBuilder, + OptimismPayloadBuilder, + OptimismNetworkBuilder, + OptimismExecutorBuilder, + >; fn components_builder(self) -> Self::ComponentsBuilder { let Self { args } = self; @@ -67,10 +85,21 @@ where impl NodeTypes for OptimismNode { type Primitives = (); type Engine = OptimismEngineTypes; - type Evm = OptimismEvmConfig; +} + +/// A regular optimism evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct OptimismExecutorBuilder; + +impl ExecutorBuilder for OptimismExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = OptimismEvmConfig; - fn evm_config(&self) -> Self::Evm { - OptimismEvmConfig::default() + async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(OptimismEvmConfig::default()) } } @@ -151,7 +180,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OptimismPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -161,19 +190,22 @@ pub struct OptimismPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, + /// The EVM configuration to use for the payload builder. + pub evm_config: EVM, } -impl OptimismPayloadBuilder { - /// Create a new instance with the given `compute_pending_block` flag. - pub const fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block } +impl OptimismPayloadBuilder { + /// Create a new instance with the given `compute_pending_block` flag and evm config. + pub const fn new(compute_pending_block: bool, evm_config: EVM) -> Self { + Self { compute_pending_block, evm_config } } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OptimismPayloadBuilder where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { async fn spawn_payload_service( self, @@ -182,7 +214,7 @@ where ) -> eyre::Result> { let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::new( ctx.chain_spec(), - ctx.evm_config().clone(), + self.evm_config, ) .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 64f96bd2d..5d26e8bda 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -12,7 +12,7 @@ fn test_basic_setup() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(OptimismNode::default()) + .with_types::() .with_components(OptimismNode::components(Default::default())) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f58f77dd0..b0f43ba9f 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -89,6 +89,13 @@ impl BlockchainProvider { ) -> Self { Self { database, tree, chain_info: ChainInfoTracker::new(latest) } } + + /// Sets the treeviewer for the provider. + #[doc(hidden)] + pub fn with_tree(mut self, tree: Arc) -> Self { + self.tree = tree; + self + } } impl BlockchainProvider diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 79639e1ba..ada28c0f3 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -37,9 +37,8 @@ use reth_node_api::{ EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{ - node::{EthereumNetworkBuilder, EthereumPoolBuilder}, - EthEvmConfig, +use reth_node_ethereum::node::{ + EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, }; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderHandle, @@ -187,12 +186,6 @@ impl NodeTypes for MyCustomNode { type Primitives = (); // use the custom engine types type Engine = CustomEngineTypes; - // use the default ethereum EVM config - type Evm = EthEvmConfig; - - fn evm_config(&self) -> Self::Evm { - Self::Evm::default() - } } /// Implement the Node trait for the custom node @@ -207,6 +200,7 @@ where EthereumPoolBuilder, CustomPayloadServiceBuilder, EthereumNetworkBuilder, + EthereumExecutorBuilder, >; fn components_builder(self) -> Self::ComponentsBuilder { @@ -215,6 +209,7 @@ where .pool(EthereumPoolBuilder::default()) .payload(CustomPayloadServiceBuilder::default()) .network(EthereumNetworkBuilder::default()) + .executor(EthereumExecutorBuilder::default()) } } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 6c80c9a74..e5362c808 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -3,7 +3,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use reth::{ - builder::{node::NodeTypes, NodeBuilder}, + builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, primitives::{ address, revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, @@ -17,9 +17,9 @@ use reth::{ }, tasks::TaskManager, }; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthereumNode}; +use reth_node_ethereum::{EthEvmConfig, EthereumNode}; use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -104,18 +104,19 @@ impl ConfigureEvm for MyEvmConfig { } } -#[derive(Debug, Clone, Default)] +/// A regular ethereum evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -struct MyCustomNode; +pub struct MyExecutorBuilder; -/// Configure the node types -impl NodeTypes for MyCustomNode { - type Primitives = (); - type Engine = EthEngineTypes; - type Evm = MyEvmConfig; +impl ExecutorBuilder for MyExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = MyEvmConfig; - fn evm_config(&self) -> Self::Evm { - Self::Evm::default() + async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(MyEvmConfig::default()) } } @@ -140,8 +141,8 @@ async fn main() -> eyre::Result<()> { let handle = NodeBuilder::new(node_config) .testing_node(tasks.executor()) - .with_types(MyCustomNode::default()) - .with_components(EthereumNode::components()) + .with_types::() + .with_components(EthereumNode::components().executor(MyExecutorBuilder::default())) .launch() .await .unwrap(); diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 96672807d..a6db90674 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -19,7 +19,7 @@ fn main() { .run(|builder, _| async move { let handle = builder // use the default ethereum node types - .with_types(EthereumNode::default()) + .with_types::() // Configure the components of the node // use default ethereum components but use our custom pool .with_components(EthereumNode::components().pool(CustomPoolBuilder::default())) diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 8e028771b..2c468c34a 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -73,7 +73,7 @@ fn main() { Cli::parse_args() .run(|builder, _| async move { let handle = builder - .with_types(EthereumNode::default()) + .with_types::() // Configure the components of the node // use default ethereum components but use our custom payload builder .with_components( From afbb265b474cc4eb256f7bde2889a01e1a2ea33d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 30 Apr 2024 18:57:00 +0200 Subject: [PATCH 138/250] refactor: use `reth_rpc_types` `BlockId` in optimism rpc (#8003) --- crates/rpc/rpc-api/src/optimism.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/crates/rpc/rpc-api/src/optimism.rs b/crates/rpc/rpc-api/src/optimism.rs index 80d60415d..3ff7c6ce3 100644 --- a/crates/rpc/rpc-api/src/optimism.rs +++ b/crates/rpc/rpc-api/src/optimism.rs @@ -2,18 +2,10 @@ #![allow(unreachable_pub)] use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{Address, BlockNumber, ChainId, B256}; -use reth_rpc_types::BlockNumberOrTag; +use reth_rpc_types::{BlockId, BlockNumberOrTag}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, net::IpAddr}; -/// todo: move to reth_rpc_types - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlockId { - pub hash: B256, - pub number: BlockNumber, -} - // https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/id.go#L33 #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -329,19 +321,19 @@ mod tests { #[test] fn test_output_response() { - let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"hash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07","number":19665170},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}}"#; + let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"blockHash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824"},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"blockHash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07"},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1}}}"#; test_helper::(output_response_json); } #[test] fn serialize_sync_status() { - let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}"#; + let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"blockHash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824"},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1}}"#; test_helper::(sync_status_json); } #[test] fn test_rollup_config() { - let rollup_config_json = r#"{"genesis":{"l1":{"hash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108","number":17422590},"l2":{"hash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3","number":105235063},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; + let rollup_config_json = r#"{"genesis":{"l1":{"blockHash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"},"l2":{"blockHash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; test_helper::(rollup_config_json); } From e09895257a0bb30d74139a88fbc850b0a655812e Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 30 Apr 2024 11:41:01 -0600 Subject: [PATCH 139/250] feat: add `Deref` impl for `PayloadTaskGuard` (#7971) --- crates/payload/basic/src/lib.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c32961c72..8da9163d0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -35,6 +35,7 @@ use revm::{ }; use std::{ future::Future, + ops::Deref, pin::Pin, sync::{atomic::AtomicBool, Arc}, task::{Context, Poll}, @@ -228,6 +229,14 @@ pub struct PrecachedState { #[derive(Debug, Clone)] pub struct PayloadTaskGuard(Arc); +impl Deref for PayloadTaskGuard { + type Target = Semaphore; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + // === impl PayloadTaskGuard === impl PayloadTaskGuard { @@ -385,7 +394,7 @@ where let builder = this.builder.clone(); this.executor.spawn_blocking(Box::pin(async move { // acquire the permit for executing the task - let _permit = guard.0.acquire().await; + let _permit = guard.acquire().await; let args = BuildArguments { client, pool, From d532217afbf201bc4cb0c4557e742721af53dcab Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 30 Apr 2024 21:18:19 +0200 Subject: [PATCH 140/250] fix(op): discv5 ENR (#7991) --- crates/net/discv5/src/config.rs | 21 +- crates/net/discv5/src/error.rs | 3 + crates/net/discv5/src/filter.rs | 15 +- crates/net/discv5/src/lib.rs | 39 +- crates/net/discv5/src/metrics.rs | 23 +- crates/net/discv5/src/network_key.rs | 11 - crates/net/discv5/src/network_stack_id.rs | 33 + crates/net/network/src/config.rs | 15 +- crates/primitives/src/chain/mod.rs | 2 +- crates/primitives/src/chain/spec.rs | 23 +- crates/primitives/src/lib.rs | 2 +- etc/grafana/dashboards/reth-discovery.json | 1959 +++++++++++--------- 12 files changed, 1181 insertions(+), 965 deletions(-) delete mode 100644 crates/net/discv5/src/network_key.rs create mode 100644 crates/net/discv5/src/network_stack_id.rs diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 266b530ef..371d40953 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -9,9 +9,9 @@ use std::{ use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; -use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord, MAINNET}; +use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; -use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, network_key}; +use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; /// Default interval in seconds at which to run a lookup up query. /// @@ -50,7 +50,7 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork: (network_key, fork_id), + fork, tcp_port, other_enr_kv_pairs, lookup_interval, @@ -60,7 +60,7 @@ impl ConfigBuilder { Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some((network_key, fork_id.fork_id)), + fork: fork.map(|(key, fork_id)| (key, fork_id.fork_id)), tcp_port, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), @@ -117,8 +117,8 @@ impl ConfigBuilder { /// Set fork ID kv-pair to set in local [`Enr`](discv5::enr::Enr). This lets peers on discovery /// network know which chain this node belongs to. - pub fn fork(mut self, network_key: &'static [u8], fork_id: ForkId) -> Self { - self.fork = Some((network_key, fork_id)); + pub fn fork(mut self, fork_key: &'static [u8], fork_id: ForkId) -> Self { + self.fork = Some((fork_key, fork_id)); self } @@ -160,13 +160,12 @@ impl ConfigBuilder { let discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let (network_key, fork_id) = fork.unwrap_or((network_key::ETH, MAINNET.latest_fork_id())); - let fork = (network_key, fork_id.into()); + let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); - let discovered_peer_filter = - discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[network_key::ETH2])); + let discovered_peer_filter = discovered_peer_filter + .unwrap_or_else(|| MustNotIncludeKeys::new(&[NetworkStackId::ETH2])); Config { discv5_config, @@ -190,7 +189,7 @@ pub struct Config { pub(super) bootstrap_nodes: HashSet, /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. - pub(super) fork: (&'static [u8], EnrForkIdEntry), + pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>, /// RLPx TCP port to advertise. pub(super) tcp_port: u16, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index 7e4fa8653..165620898 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -11,6 +11,9 @@ pub enum Error { /// Node record has incompatible key type. #[error("incompatible key type (not secp256k1)")] IncompatibleKeyType, + /// No key used to identify rlpx network is configured. + #[error("network stack identifier is not configured")] + NetworkStackIdNotConfigured, /// Missing key used to identify rlpx network. #[error("fork missing on enr, key missing")] ForkMissing(&'static [u8]), diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index f2f2f2fd6..2e20e2fbd 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -96,7 +96,7 @@ mod tests { use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::network_key::{ETH, ETH2}; + use crate::NetworkStackId; use super::*; @@ -104,16 +104,21 @@ mod tests { fn must_not_include_key_filter() { // rig test - let filter = MustNotIncludeKeys::new(&[ETH, ETH2]); + let filter = MustNotIncludeKeys::new(&[NetworkStackId::ETH, NetworkStackId::ETH2]); // enr_1 advertises a fork from one of the keys configured in filter let sk = CombinedKey::generate_secp256k1(); - let enr_1 = - Enr::builder().add_value_rlp(ETH as &[u8], Bytes::from("cancun")).build(&sk).unwrap(); + let enr_1 = Enr::builder() + .add_value_rlp(NetworkStackId::ETH as &[u8], Bytes::from("cancun")) + .build(&sk) + .unwrap(); // enr_2 advertises a fork from one the other key configured in filter let sk = CombinedKey::generate_secp256k1(); - let enr_2 = Enr::builder().add_value_rlp(ETH2, Bytes::from("deneb")).build(&sk).unwrap(); + let enr_2 = Enr::builder() + .add_value_rlp(NetworkStackId::ETH2, Bytes::from("deneb")) + .build(&sk) + .unwrap(); // test diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 5275956bf..14793fab0 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -33,7 +33,7 @@ pub mod enr; pub mod error; pub mod filter; pub mod metrics; -pub mod network_key; +pub mod network_stack_id; pub use discv5::{self, IpMode}; @@ -41,6 +41,7 @@ pub use config::{BootNode, Config, ConfigBuilder}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; +pub use network_stack_id::NetworkStackId; use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; @@ -75,7 +76,7 @@ pub struct Discv5 { /// [`IpMode`] of the the node. ip_mode: IpMode, /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. - fork_key: &'static [u8], + fork_key: Option<&'static [u8]>, /// Filter applied to a discovered peers before passing it up to app. discovered_peer_filter: MustNotIncludeKeys, /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. @@ -217,7 +218,7 @@ impl Discv5 { fn build_local_enr( sk: &SecretKey, config: &Config, - ) -> (Enr, NodeRecord, &'static [u8], IpMode) { + ) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { let mut builder = discv5::enr::Enr::builder(); let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; @@ -258,8 +259,10 @@ impl Discv5 { }; // identifies which network node is on - let (network, fork_value) = fork; - builder.add_value_rlp(network, alloy_rlp::encode(fork_value).into()); + let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { + builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); + *network_stack_id + }); // add other data for (key, value) in other_enr_kv_pairs { @@ -273,7 +276,7 @@ impl Discv5 { // backwards compatible enr let bc_enr = NodeRecord::from_secret_key(socket, sk); - (enr, bc_enr, network, ip_mode) + (enr, bc_enr, network_stack_id, ip_mode) } /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. @@ -438,8 +441,10 @@ impl Discv5 { return None } - let fork_id = - (self.fork_key == network_key::ETH).then(|| self.get_fork_id(enr).ok()).flatten(); + // todo: extend for all network stacks in reth-network rlpx logic + let fork_id = (self.fork_key == Some(NetworkStackId::ETH)) + .then(|| self.get_fork_id(enr).ok()) + .flatten(); trace!(target: "net::discovery::discv5", ?fork_id, @@ -483,12 +488,13 @@ impl Discv5 { self.discovered_peer_filter.filter(enr) } - /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr), if field is set. + /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network + /// stack, if field is set. fn get_fork_id( &self, enr: &discv5::enr::Enr, ) -> Result { - let key = self.fork_key; + let Some(key) = self.fork_key else { return Err(Error::NetworkStackIdNotConfigured) }; let fork_id = enr .get_decodable::(key) .ok_or(Error::ForkMissing(key))? @@ -519,7 +525,7 @@ impl Discv5 { } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). - pub fn fork_key(&self) -> &[u8] { + pub fn fork_key(&self) -> Option<&[u8]> { self.fork_key } } @@ -625,7 +631,7 @@ mod tests { .unwrap(), ), ip_mode: IpMode::Ip4, - fork_key: b"noop", + fork_key: None, discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), } @@ -831,13 +837,16 @@ mod tests { const TCP_PORT: u16 = 30303; let fork_id = MAINNET.latest_fork_id(); - let config = Config::builder(TCP_PORT).fork(network_key::ETH, fork_id).build(); + let config = Config::builder(TCP_PORT).fork(NetworkStackId::ETH, fork_id).build(); let sk = SecretKey::new(&mut thread_rng()); let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); - let decoded_fork_id = - enr.get_decodable::(network_key::ETH).unwrap().map(Into::into).unwrap(); + let decoded_fork_id = enr + .get_decodable::(NetworkStackId::ETH) + .unwrap() + .map(Into::into) + .unwrap(); assert_eq!(fork_id, decoded_fork_id); assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 12b024a2f..7bd3572f7 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -2,7 +2,7 @@ use metrics::{Counter, Gauge}; use reth_metrics::Metrics; -use crate::network_key::{ETH, ETH2, OPSTACK}; +use crate::NetworkStackId; /// Information tracked by [`Discv5`](crate::Discv5). #[derive(Debug, Default, Clone)] @@ -91,27 +91,34 @@ impl DiscoveredPeersMetrics { #[derive(Metrics, Clone)] #[metrics(scope = "discv5")] pub struct AdvertisedChainMetrics { - /// Frequency of node records with a kv-pair with [`OPSTACK`](crate::network_key) as + /// Frequency of node records with a kv-pair with [`OPEL`](NetworkStackId::OPEL) as + /// key. + opel: Counter, + + /// Frequency of node records with a kv-pair with [`OPSTACK`](NetworkStackId::OPSTACK) as /// key. opstack: Counter, - /// Frequency of node records with a kv-pair with [`ETH`](crate::network_key) as key. + /// Frequency of node records with a kv-pair with [`ETH`](NetworkStackId::ETH) as key. eth: Counter, - /// Frequency of node records with a kv-pair with [`ETH2`](crate::network_key) as key. + /// Frequency of node records with a kv-pair with [`ETH2`](NetworkStackId::ETH2) as key. eth2: Counter, } impl AdvertisedChainMetrics { - /// Counts each recognised network type that is advertised on node record, once. + /// Counts each recognised network stack type that is advertised on node record, once. pub fn increment_once_by_network_type(&self, enr: &discv5::Enr) { - if enr.get_raw_rlp(OPSTACK).is_some() { + if enr.get_raw_rlp(NetworkStackId::OPEL).is_some() { + self.opel.increment(1u64) + } + if enr.get_raw_rlp(NetworkStackId::OPSTACK).is_some() { self.opstack.increment(1u64) } - if enr.get_raw_rlp(ETH).is_some() { + if enr.get_raw_rlp(NetworkStackId::ETH).is_some() { self.eth.increment(1u64) } - if enr.get_raw_rlp(ETH2).is_some() { + if enr.get_raw_rlp(NetworkStackId::ETH2).is_some() { self.eth2.increment(1u64) } } diff --git a/crates/net/discv5/src/network_key.rs b/crates/net/discv5/src/network_key.rs deleted file mode 100644 index 47576e5b2..000000000 --- a/crates/net/discv5/src/network_key.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network a node -//! belongs to. - -/// ENR fork ID kv-pair key, for an Ethereum L1 EL node. -pub const ETH: &[u8] = b"eth"; - -/// ENR fork ID kv-pair key, for an Ethereum L1 CL node. -pub const ETH2: &[u8] = b"eth2"; - -/// ENR fork ID kv-pair key, for an Optimism CL node. -pub const OPSTACK: &[u8] = b"opstack"; diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs new file mode 100644 index 000000000..7bfeff517 --- /dev/null +++ b/crates/net/discv5/src/network_stack_id.rs @@ -0,0 +1,33 @@ +//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network stack a node +//! belongs to. + +use reth_primitives::ChainSpec; + +/// Identifies which Ethereum network stack a node belongs to, on the discovery network. +#[derive(Debug)] +pub struct NetworkStackId; + +impl NetworkStackId { + /// ENR fork ID kv-pair key, for an Ethereum L1 EL node. + pub const ETH: &'static [u8] = b"eth"; + + /// ENR fork ID kv-pair key, for an Ethereum L1 CL node. + pub const ETH2: &'static [u8] = b"eth2"; + + /// ENR fork ID kv-pair key, for an Optimism EL node. + pub const OPEL: &'static [u8] = b"opel"; + + /// ENR fork ID kv-pair key, for an Optimism CL node. + pub const OPSTACK: &'static [u8] = b"opstack"; + + /// Returns the [`NetworkStackId`] that matches the given [`ChainSpec`]. + pub fn id(chain: &ChainSpec) -> Option<&'static [u8]> { + if chain.is_optimism() { + return Some(Self::OPEL) + } else if chain.is_eth() { + return Some(Self::ETH) + } + + None + } +} diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 463bde78d..9e898014f 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -9,17 +9,18 @@ use crate::{ NetworkHandle, NetworkManager, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; -use reth_discv5::network_key; +use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ - mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NamedChain, NodeRecord, MAINNET, + mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; + // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; pub use secp256k1::SecretKey; @@ -121,20 +122,16 @@ impl NetworkConfig { f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, ) -> Self { let rlpx_port = self.listener_addr.port(); - let chain = self.chain_spec.chain; + let network_stack_id = NetworkStackId::id(&self.chain_spec); let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); let mut builder = reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); - if chain.named() == Some(NamedChain::Mainnet) { - builder = builder.fork(network_key::ETH, fork_id) + if let Some(id) = network_stack_id { + builder = builder.fork(id, fork_id); } - // todo: set op EL fork id - /*if chain.is_optimism() { - builder = builder.fork(network_key::, fork_id) - }*/ self.set_discovery_v5(f(builder)) } diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index f8425f95e..bf60392cd 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -1,4 +1,4 @@ -pub use alloy_chains::{Chain, NamedChain}; +pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 4ce26f84b..cf45cceea 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -4,8 +4,9 @@ use crate::{ net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, proofs::state_root_ref_unhashed, revm_primitives::{address, b256}, - Address, BlockNumber, Chain, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, Hardfork, - Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Address, BlockNumber, Chain, ChainKind, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, + Hardfork, Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, + U256, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; @@ -577,6 +578,24 @@ impl ChainSpec { self.chain } + /// Returns `true` if this chain contains Ethereum configuration. + #[inline] + pub fn is_eth(&self) -> bool { + matches!( + self.chain.kind(), + ChainKind::Named( + NamedChain::Mainnet | + NamedChain::Morden | + NamedChain::Ropsten | + NamedChain::Rinkeby | + NamedChain::Goerli | + NamedChain::Kovan | + NamedChain::Holesky | + NamedChain::Sepolia + ) + ) + } + /// Returns `true` if this chain contains Optimism configuration. #[inline] pub fn is_optimism(&self) -> bool { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 1c8808628..d20a35164 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -55,7 +55,7 @@ pub use block::{ ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, }; pub use chain::{ - AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainSpec, + AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainKind, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, NamedChain, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 53d71cd76..787913e65 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -1,976 +1,1131 @@ { - "__inputs": [ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.3.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.3.3" + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" + "id": 96, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ { - "builtIn": 1, "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" } - ] + ], + "title": "Version", + "transparent": true, + "type": "stat" }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 96, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Overview", - "type": "row" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 1 - }, - "id": 22, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{version}}", - "range": false, - "refId": "A" - } - ], - "title": "Version", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 3, - "y": 1 - }, - "id": 192, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_timestamp}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Timestamp", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 1 - }, - "id": 193, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{git_sha}}", - "range": false, - "refId": "A" - } - ], - "title": "Git SHA", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 12, - "y": 1 - }, - "id": 195, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_profile}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Profile", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 14, - "y": 1 - }, - "id": 196, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{target_triple}}", - "range": false, - "refId": "A" - } - ], - "title": "Target Triple", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 19, - "y": 1 - }, - "id": 197, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{cargo_features}}", - "range": false, - "refId": "A" - } - ], - "title": "Cargo Features", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 4 - }, - "id": 89, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Discv5", - "type": "row" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 89, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Discv5", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 5 - }, - "id": 198, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total peers kbuckets" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total peers kbuckets", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total connected sessions" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total connected sessions", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] } - ], - "title": "Peers", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 198, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total peers kbuckets", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total connected sessions", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "unit": "cps", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 5 - }, - "id": 199, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total Session Establishments" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Total KBucket Insertions", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total KBucket Insertions" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Total Session Establishments", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishments (pass filter)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Session Establishments (unreachable ENR)", - "range": true, - "refId": "C", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ada", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishments (unreachable ENR)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Session Establishments (pass filter)", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] } - ], - "title": "Peer Churn", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 199, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "description": "Frequency of discovering peers from some popular networks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total KBucket Insertions", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total Session Establishments", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (unreachable ENR)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (pass filter)", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Peer Churn", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of discovering peers from some popular network stacks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "unit": "cps", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 200, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Eth", - "range": true, - "refId": "A", - "useBackend": false + "thresholdsStyle": { + "mode": "off" + } }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true + }, + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Eth" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Eth2", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Eth2" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "OP", - "range": true, - "refId": "C", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Unknown" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]))", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Unknown", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ae5", + "mode": "fixed" + } + } + ] } - ], - "title": "Advertised Networks", - "type": "timeseries" - } - ], - "refresh": "30s", - "schemaVersion": 39, - "tags": [], - "templating": { - "list": [ + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 200, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth2", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP EL", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP CL", + "range": true, + "refId": "C", + "useBackend": false + }, { - "current": {}, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "query_result(reth_info)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "instance", - "options": [], - "query": { - "query": "query_result(reth_info)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "/.*instance=\\\"([^\\\"]*).*/", - "skipUrlSync": false, - "sort": 0, - "type": "query" + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Unknown", + "range": true, + "refId": "D", + "useBackend": false } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "reth - discovery", - "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", - "version": 11, - "weekStart": "" - } \ No newline at end of file + ], + "title": "Advertised Network Stacks", + "type": "timeseries" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "reth - discovery", + "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", + "version": 1, + "weekStart": "" +} \ No newline at end of file From 8e65cb3aa52ef1d621b390d0b1f834f77db9ac60 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:07:41 +0200 Subject: [PATCH 141/250] Bump `evm-inspectors` and `alloy` (#8006) --- Cargo.lock | 36 +++++++++---------- Cargo.toml | 26 +++++++------- crates/blockchain-tree/src/blockchain_tree.rs | 6 ++-- crates/consensus/common/src/validation.rs | 4 +-- crates/net/network/tests/it/connect.rs | 22 +++++------- crates/optimism/evm/src/execute.rs | 10 +++--- crates/primitives/src/transaction/eip1559.rs | 2 +- crates/primitives/src/transaction/eip2930.rs | 2 +- crates/primitives/src/transaction/legacy.rs | 2 +- crates/primitives/src/transaction/mod.rs | 28 ++++----------- crates/revm/src/optimism/processor.rs | 10 +++--- crates/revm/src/processor.rs | 4 +-- .../rpc-types-compat/src/transaction/mod.rs | 8 ++--- crates/rpc/rpc/src/eth/api/call.rs | 1 + crates/rpc/rpc/src/eth/api/server.rs | 3 +- crates/storage/codecs/src/alloy/txkind.rs | 2 +- crates/transaction-pool/src/test_utils/gen.rs | 2 +- .../transaction-pool/src/test_utils/mock.rs | 8 ++--- 18 files changed, 78 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7eca223b..f1ee55cb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,7 +133,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +166,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -185,7 +185,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-serde", @@ -208,7 +208,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "serde", @@ -220,7 +220,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-eips", @@ -237,7 +237,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -279,7 +279,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -329,7 +329,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -349,7 +349,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-eips", @@ -371,7 +371,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-serde", @@ -381,7 +381,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-eips", @@ -401,7 +401,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -413,7 +413,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "serde", @@ -423,7 +423,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", "async-trait", @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-consensus", "alloy-network", @@ -511,7 +511,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -529,7 +529,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=4e22b9e#4e22b9e1de80f1b1cc5dfdcd9461d44b27cf27ca" +source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7944,7 +7944,7 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=848d568#848d5688d0c499c538b9a78b423a7061525aa580" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=3d2077e#3d2077ee665046c256448a8bd90d8e93ea85de56" dependencies = [ "alloy-primitives", "alloy-rpc-types", diff --git a/Cargo.toml b/Cargo.toml index ab330d87e..28b0692dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "848d568" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "3d2077e" } # eth alloy-chains = "0.1.15" @@ -291,20 +291,20 @@ alloy-dyn-abi = "0.7.1" alloy-sol-types = "0.7.1" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "4e22b9e" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "4e22b9e" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "ca54552" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } # misc auto_impl = "1" diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index eee4163c7..09f829c7e 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1285,8 +1285,8 @@ mod tests { revm_primitives::AccountInfo, stage::StageCheckpoint, Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxKind, - Withdrawals, B256, MAINNET, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256, + MAINNET, }; use reth_provider::{ test_utils::{ @@ -1465,7 +1465,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce, gas_limit: 21_000, - to: TxKind::Call(Address::ZERO), + to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index c6e4e0aee..b67d40e98 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -202,7 +202,7 @@ mod tests { use reth_primitives::{ hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, - TxKind, Withdrawal, Withdrawals, U256, + Withdrawal, Withdrawals, U256, }; use reth_provider::AccountReader; use std::ops::RangeBounds; @@ -313,7 +313,7 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - to: TxKind::Call(Address::default()), + to: Address::default().into(), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 1ff59bf40..7b9c785eb 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -1,7 +1,7 @@ //! Connection tests use alloy_node_bindings::Geth; -use alloy_provider::{admin::AdminApi, ProviderBuilder}; +use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_discv4::Discv4Config; use reth_eth_wire::DisconnectReason; @@ -320,9 +320,8 @@ async fn test_incoming_node_id_blacklist() { let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -375,9 +374,8 @@ async fn test_incoming_connect_with_single_geth() { let temp_dir = tempfile::tempdir().unwrap().into_path(); let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -438,9 +436,8 @@ async fn test_outgoing_connect_with_single_geth() { let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()).to_string(); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -485,9 +482,8 @@ async fn test_geth_disconnect() { let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()).to_string(); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index c56c7622e..a77f42205 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -539,7 +539,7 @@ mod tests { use super::*; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, + Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -609,7 +609,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -618,7 +618,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), @@ -689,7 +689,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -698,7 +698,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 06cbc129c..5da0cd881 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -243,7 +243,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TxKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::ZERO, input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index b0d1291e8..0604a7888 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -225,7 +225,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TxKind::Call(Address::default()), + to: Address::default().into(), value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index f2440e13c..448662a24 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -190,7 +190,7 @@ mod tests { nonce: 0x18, gas_price: 0xfa56ea00, gas_limit: 119902, - to: TxKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), + to: TxKind::Call(hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), value: U256::from(0x1c6bf526340000u64), input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), }); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f401b0ef6..7b79a85a2 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1866,9 +1866,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TxKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::default(), }); @@ -1888,9 +1886,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TxKind::Call(Address::from_slice( - &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], - )), + to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(693361000000000u64), input: Default::default(), }); @@ -1909,9 +1905,7 @@ mod tests { nonce: 3, gas_price: 2000000000, gas_limit: 10000000, - to: TxKind::Call(Address::from_slice( - &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], - )), + to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(1000000000000000u64), input: Bytes::default(), }); @@ -1931,9 +1925,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000, - to: TxKind::Call(Address::from_slice( - &hex!("61815774383099e24810ab832a5b2a5425c154d5")[..], - )), + to: Address::from_slice(&hex!("61815774383099e24810ab832a5b2a5425c154d5")[..]).into(), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -1953,9 +1945,7 @@ mod tests { nonce: 15, gas_price: 2200000000, gas_limit: 34811, - to: TxKind::Call(Address::from_slice( - &hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..], - )), + to: Address::from_slice(&hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..]).into(), value: U256::from(1234), input: Bytes::default(), }); @@ -2242,9 +2232,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TxKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::from(input), }); @@ -2291,9 +2279,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TxKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::from(vec![3u8; 64]), }); diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index bd68023be..01ec2efde 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -206,7 +206,7 @@ mod tests { }; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionSigned, TxEip1559, TxKind, BASE_MAINNET, + StorageValue, Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, }; use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr, sync::Arc}; @@ -278,7 +278,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -287,7 +287,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), @@ -352,7 +352,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TxKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -361,7 +361,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TxKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index e6a85b77d..c22272abc 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -466,7 +466,7 @@ mod tests { bytes, constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TxEip1559, TxKind, MAINNET, + TxEip1559, MAINNET, }; use revm::{Database, TransitionState}; use std::collections::HashMap; @@ -855,7 +855,7 @@ mod tests { chain_id, nonce: 1, gas_limit: 21_000, - to: TxKind::Call(Address::ZERO), + to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index a441c4c29..6a35429c5 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,9 +1,7 @@ //! Compatibility functions for rpc `Transaction` type. use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; -use reth_primitives::{ - BlockNumber, TransactionSignedEcRecovered, TxKind as PrimitiveTransactionKind, TxType, B256, -}; +use reth_primitives::{BlockNumber, TransactionSignedEcRecovered, TxKind, TxType, B256}; use reth_rpc_types::Transaction; use signature::from_primitive_signature; pub use typed::*; @@ -45,8 +43,8 @@ fn fill( let signed_tx = tx.into_signed(); let to = match signed_tx.kind() { - PrimitiveTransactionKind::Create => None, - PrimitiveTransactionKind::Call(to) => Some(*to), + TxKind::Create => None, + TxKind::Call(to) => Some(*to), }; #[allow(unreachable_patterns)] diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 7066f7372..191406f96 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -443,6 +443,7 @@ where Ok(AccessListWithGasUsed { access_list, gas_used }) } + /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index c2be79a10..7ba1539b8 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -429,8 +429,6 @@ where #[cfg(test)] mod tests { - use jsonrpsee::types::error::INVALID_PARAMS_CODE; - use crate::{ eth::{ cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, @@ -438,6 +436,7 @@ mod tests { }, EthApi, }; + use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; diff --git a/crates/storage/codecs/src/alloy/txkind.rs b/crates/storage/codecs/src/alloy/txkind.rs index 220384bdd..e1dffa15b 100644 --- a/crates/storage/codecs/src/alloy/txkind.rs +++ b/crates/storage/codecs/src/alloy/txkind.rs @@ -21,7 +21,7 @@ impl Compact for TxKind { 0 => (TxKind::Create, buf), 1 => { let (addr, buf) = Address::from_compact(buf, buf.len()); - (TxKind::Call(addr), buf) + (addr.into(), buf) } _ => { unreachable!("Junk data in database: unknown TransactionKind variant",) diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 2e3c71828..5c335e5d6 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -306,7 +306,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction, mutable reference version. pub fn set_to(&mut self, to: Address) -> &mut Self { - self.to = TxKind::Call(to); + self.to = to.into(); self } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 7eda40e58..8e265e7ba 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -213,7 +213,7 @@ impl MockTransaction { nonce: 0, gas_price: 0, gas_limit: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Default::default(), size: Default::default(), @@ -229,7 +229,7 @@ impl MockTransaction { max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, gas_limit: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -247,7 +247,7 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Bytes::new(), accesslist: Default::default(), @@ -272,7 +272,7 @@ impl MockTransaction { hash: B256::random(), sender: Address::random(), nonce: 0, - to: TxKind::Call(Address::random()), + to: Address::random().into(), gas_limit: 0, input: Bytes::new(), value: Default::default(), From bf9d9745edcdfedf226ef9e6b93ae93ca256a86e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 30 Apr 2024 20:20:23 -0400 Subject: [PATCH 142/250] fix(op): use canyon,ecotone fork timestamps for op mainnet (#8011) --- crates/primitives/src/chain/spec.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index cf45cceea..d0a5b8433 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -296,6 +296,8 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), + (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), + (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ @@ -2469,6 +2471,25 @@ Post-merge hard forks (timestamp based): ); } + #[cfg(feature = "optimism")] + #[test] + fn op_mainnet_forkids() { + test_fork_ids( + &OP_MAINNET, + &[ + ( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0xca, 0xf5, 0x17, 0xed]), next: 3950000 }, + ), + // TODO: complete these, see https://github.com/paradigmxyz/reth/issues/8012 + ( + Head { number: 105235063, timestamp: 1710374401, ..Default::default() }, + ForkId { hash: ForkHash([0x19, 0xda, 0x4c, 0x52]), next: 0 }, + ), + ], + ); + } + #[cfg(feature = "optimism")] #[test] fn base_sepolia_forkids() { From 074c5c301373cec1a0b755e213abf973293a0014 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 30 Apr 2024 22:06:37 -0400 Subject: [PATCH 143/250] feat: introduce external context GAT in ConfigureEvm (#7842) --- crates/ethereum/evm/src/lib.rs | 12 +++++++++++- crates/evm/src/lib.rs | 11 +++++++---- crates/optimism/evm/src/lib.rs | 6 ++++-- crates/revm/src/test_utils.rs | 17 +++++++++++------ examples/custom-evm/src/main.rs | 4 +++- 5 files changed, 36 insertions(+), 14 deletions(-) diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index a320a2b3c..adcfd700d 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -14,6 +14,7 @@ use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, Address, ChainSpec, Head, Header, Transaction, U256, }; +use reth_revm::{Database, EvmBuilder}; pub mod execute; /// Ethereum-related EVM configuration. @@ -55,7 +56,16 @@ impl ConfigureEvmEnv for EthEvmConfig { } } -impl ConfigureEvm for EthEvmConfig {} +impl ConfigureEvm for EthEvmConfig { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>( + &self, + db: DB, + ) -> reth_revm::Evm<'a, Self::DefaultExternalContext<'a>, DB> { + EvmBuilder::default().with_db(db).build() + } +} #[cfg(test)] mod tests { diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 9179abc33..154aac2d7 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -16,14 +16,15 @@ pub mod execute; /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { + /// Associated type for the default external context that should be configured for the EVM. + type DefaultExternalContext<'a>; + /// Returns new EVM with the given database /// /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - EvmBuilder::default().with_db(db).build() - } + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. @@ -33,7 +34,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { &self, db: DB, env: EnvWithHandlerCfg, - ) -> Evm<'a, (), DB> { + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { let mut evm = self.evm(db); evm.modify_spec_id(env.spec_id()); evm.context.evm.env = env.env; @@ -43,6 +44,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// + /// This will use the given external inspector as the EVM external context. + /// /// This will preserve any handler modifications fn evm_with_env_and_inspector<'a, DB, I>( &self, diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 6a6324302..8ab6fd426 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -61,7 +61,9 @@ impl ConfigureEvmEnv for OptimismEvmConfig { } impl ConfigureEvm for OptimismEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default().with_db(db).optimism().build() } @@ -83,7 +85,7 @@ impl ConfigureEvm for OptimismEvmConfig { mod tests { use super::*; use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; - use reth_revm::primitives::SpecId; + use revm_primitives::SpecId; #[test] #[ignore] diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 193736987..73df4ea4b 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -18,11 +18,12 @@ use std::collections::HashMap; #[cfg(feature = "optimism")] use { reth_primitives::revm::env::fill_op_tx_env, - revm::{ - inspector_handle_register, - primitives::{HandlerCfg, SpecId}, - Database, Evm, EvmBuilder, GetInspector, - }, + revm::{inspector_handle_register, GetInspector}, +}; + +use revm::{ + primitives::{HandlerCfg, SpecId}, + Database, Evm, EvmBuilder, }; /// Mock state for testing @@ -158,9 +159,13 @@ impl ConfigureEvmEnv for TestEvmConfig { } impl ConfigureEvm for TestEvmConfig { - #[cfg(feature = "optimism")] + type DefaultExternalContext<'a> = (); + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + #[cfg(feature = "optimism")] let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; + #[cfg(not(feature = "optimism"))] + let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST }; EvmBuilder::default().with_db(db).with_handler_cfg(handler_cfg).build() } diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index e5362c808..31edf4f03 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -81,7 +81,9 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default() .with_db(db) // add additional precompiles From fb960fb3e45e11c24125ccb4bd93f2e2e21ce271 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Anda=20Estensen?= Date: Wed, 1 May 2024 10:20:53 +0200 Subject: [PATCH 144/250] refactor: remove _args prefix from modules in node_core (#8015) --- .../args/{database_args.rs => database.rs} | 0 .../src/args/{debug_args.rs => debug.rs} | 0 .../src/args/{dev_args.rs => dev.rs} | 0 ...ice_oracle_args.rs => gas_price_oracle.rs} | 0 .../src/args/{log_args.rs => log.rs} | 0 crates/node-core/src/args/mod.rs | 48 +++++++++---------- .../src/args/{network_args.rs => network.rs} | 0 ...oad_builder_args.rs => payload_builder.rs} | 0 .../src/args/{pruning_args.rs => pruning.rs} | 0 .../{rpc_server_args.rs => rpc_server.rs} | 0 ...state_cache_args.rs => rpc_state_cache.rs} | 0 .../src/args/{stage_args.rs => stage.rs} | 0 .../src/args/{txpool_args.rs => txpool.rs} | 0 13 files changed, 24 insertions(+), 24 deletions(-) rename crates/node-core/src/args/{database_args.rs => database.rs} (100%) rename crates/node-core/src/args/{debug_args.rs => debug.rs} (100%) rename crates/node-core/src/args/{dev_args.rs => dev.rs} (100%) rename crates/node-core/src/args/{gas_price_oracle_args.rs => gas_price_oracle.rs} (100%) rename crates/node-core/src/args/{log_args.rs => log.rs} (100%) rename crates/node-core/src/args/{network_args.rs => network.rs} (100%) rename crates/node-core/src/args/{payload_builder_args.rs => payload_builder.rs} (100%) rename crates/node-core/src/args/{pruning_args.rs => pruning.rs} (100%) rename crates/node-core/src/args/{rpc_server_args.rs => rpc_server.rs} (100%) rename crates/node-core/src/args/{rpc_state_cache_args.rs => rpc_state_cache.rs} (100%) rename crates/node-core/src/args/{stage_args.rs => stage.rs} (100%) rename crates/node-core/src/args/{txpool_args.rs => txpool.rs} (100%) diff --git a/crates/node-core/src/args/database_args.rs b/crates/node-core/src/args/database.rs similarity index 100% rename from crates/node-core/src/args/database_args.rs rename to crates/node-core/src/args/database.rs diff --git a/crates/node-core/src/args/debug_args.rs b/crates/node-core/src/args/debug.rs similarity index 100% rename from crates/node-core/src/args/debug_args.rs rename to crates/node-core/src/args/debug.rs diff --git a/crates/node-core/src/args/dev_args.rs b/crates/node-core/src/args/dev.rs similarity index 100% rename from crates/node-core/src/args/dev_args.rs rename to crates/node-core/src/args/dev.rs diff --git a/crates/node-core/src/args/gas_price_oracle_args.rs b/crates/node-core/src/args/gas_price_oracle.rs similarity index 100% rename from crates/node-core/src/args/gas_price_oracle_args.rs rename to crates/node-core/src/args/gas_price_oracle.rs diff --git a/crates/node-core/src/args/log_args.rs b/crates/node-core/src/args/log.rs similarity index 100% rename from crates/node-core/src/args/log_args.rs rename to crates/node-core/src/args/log.rs diff --git a/crates/node-core/src/args/mod.rs b/crates/node-core/src/args/mod.rs index 14b63dd74..bce63917b 100644 --- a/crates/node-core/src/args/mod.rs +++ b/crates/node-core/src/args/mod.rs @@ -1,55 +1,55 @@ //! Parameters for configuring the rpc more granularity via CLI /// NetworkArg struct for configuring the network -mod network_args; -pub use network_args::{DiscoveryArgs, NetworkArgs}; +mod network; +pub use network::{DiscoveryArgs, NetworkArgs}; /// RpcServerArg struct for configuring the RPC -mod rpc_server_args; -pub use rpc_server_args::RpcServerArgs; +mod rpc_server; +pub use rpc_server::RpcServerArgs; /// RpcStateCacheArgs struct for configuring RPC state cache -mod rpc_state_cache_args; -pub use rpc_state_cache_args::RpcStateCacheArgs; +mod rpc_state_cache; +pub use rpc_state_cache::RpcStateCacheArgs; /// DebugArgs struct for debugging purposes -mod debug_args; -pub use debug_args::DebugArgs; +mod debug; +pub use debug::DebugArgs; /// DatabaseArgs struct for configuring the database -mod database_args; -pub use database_args::DatabaseArgs; +mod database; +pub use database::DatabaseArgs; /// LogArgs struct for configuring the logger -mod log_args; -pub use log_args::{ColorMode, LogArgs}; +mod log; +pub use log::{ColorMode, LogArgs}; mod secret_key; pub use secret_key::{get_secret_key, SecretKeyError}; /// PayloadBuilderArgs struct for configuring the payload builder -mod payload_builder_args; -pub use payload_builder_args::PayloadBuilderArgs; +mod payload_builder; +pub use payload_builder::PayloadBuilderArgs; /// Stage related arguments -mod stage_args; -pub use stage_args::StageEnum; +mod stage; +pub use stage::StageEnum; /// Gas price oracle related arguments -mod gas_price_oracle_args; -pub use gas_price_oracle_args::GasPriceOracleArgs; +mod gas_price_oracle; +pub use gas_price_oracle::GasPriceOracleArgs; /// TxPoolArgs for configuring the transaction pool -mod txpool_args; -pub use txpool_args::TxPoolArgs; +mod txpool; +pub use txpool::TxPoolArgs; /// DevArgs for configuring the dev testnet -mod dev_args; -pub use dev_args::DevArgs; +mod dev; +pub use dev::DevArgs; /// PruneArgs for configuring the pruning and full node -mod pruning_args; -pub use pruning_args::PruningArgs; +mod pruning; +pub use pruning::PruningArgs; pub mod utils; diff --git a/crates/node-core/src/args/network_args.rs b/crates/node-core/src/args/network.rs similarity index 100% rename from crates/node-core/src/args/network_args.rs rename to crates/node-core/src/args/network.rs diff --git a/crates/node-core/src/args/payload_builder_args.rs b/crates/node-core/src/args/payload_builder.rs similarity index 100% rename from crates/node-core/src/args/payload_builder_args.rs rename to crates/node-core/src/args/payload_builder.rs diff --git a/crates/node-core/src/args/pruning_args.rs b/crates/node-core/src/args/pruning.rs similarity index 100% rename from crates/node-core/src/args/pruning_args.rs rename to crates/node-core/src/args/pruning.rs diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server.rs similarity index 100% rename from crates/node-core/src/args/rpc_server_args.rs rename to crates/node-core/src/args/rpc_server.rs diff --git a/crates/node-core/src/args/rpc_state_cache_args.rs b/crates/node-core/src/args/rpc_state_cache.rs similarity index 100% rename from crates/node-core/src/args/rpc_state_cache_args.rs rename to crates/node-core/src/args/rpc_state_cache.rs diff --git a/crates/node-core/src/args/stage_args.rs b/crates/node-core/src/args/stage.rs similarity index 100% rename from crates/node-core/src/args/stage_args.rs rename to crates/node-core/src/args/stage.rs diff --git a/crates/node-core/src/args/txpool_args.rs b/crates/node-core/src/args/txpool.rs similarity index 100% rename from crates/node-core/src/args/txpool_args.rs rename to crates/node-core/src/args/txpool.rs From f832b66f996dbf38106b68666c79bc02cd6902ad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 1 May 2024 13:24:46 +0200 Subject: [PATCH 145/250] chore: rm uneccessary trait bounds (#8019) --- crates/rpc/rpc/src/layers/auth_layer.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/rpc/rpc/src/layers/auth_layer.rs b/crates/rpc/rpc/src/layers/auth_layer.rs index 0137fcd0c..ed22d607c 100644 --- a/crates/rpc/rpc/src/layers/auth_layer.rs +++ b/crates/rpc/rpc/src/layers/auth_layer.rs @@ -44,11 +44,7 @@ pub struct AuthLayer { validator: V, } -impl AuthLayer -where - V: AuthValidator, - V::ResponseBody: Body, -{ +impl AuthLayer { /// Creates an instance of [`AuthLayer`]. /// `validator` is a generic trait able to validate requests (see [`AuthValidator`]). pub fn new(validator: V) -> Self { From 99db2b352fc3a1279229b7fb8fc9689c2c098be8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 1 May 2024 15:44:50 +0200 Subject: [PATCH 146/250] feat(op): nonce replay (#7781) --- bin/reth/src/cli/mod.rs | 7 +- bin/reth/src/commands/import.rs | 265 +++++++++-------- bin/reth/src/commands/import_op.rs | 274 ++++++++++++++++++ bin/reth/src/commands/mod.rs | 1 + crates/net/downloaders/src/file_client.rs | 17 ++ crates/primitives/src/stage/id.rs | 5 + crates/stages/src/stages/tx_lookup.rs | 17 +- .../src/providers/database/provider.rs | 5 + 8 files changed, 460 insertions(+), 131 deletions(-) create mode 100644 bin/reth/src/commands/import_op.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 9c81b0aec..e7d278964 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -6,7 +6,8 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, + config_cmd, db, debug_cmd, dump_genesis, import, import_op, init_cmd, init_state, + node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, version::{LONG_VERSION, SHORT_VERSION}, @@ -147,6 +148,7 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), @@ -183,6 +185,9 @@ pub enum Commands { /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), + /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. + #[command(name = "import-op")] + ImportOp(import_op::ImportOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 4731bf565..e1851f51e 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -14,7 +14,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; -use reth_db::{database::Database, init_db}; +use reth_db::{database::Database, init_db, tables, transaction::DbTx}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, @@ -30,8 +30,8 @@ use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ - BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory, - StageCheckpointReader, StaticFileProviderFactory, + BlockNumReader, ChainSpecProvider, HeaderProvider, HeaderSyncMode, ProviderError, + ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_stages::{ prelude::*, @@ -41,7 +41,7 @@ use reth_stages::{ use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; -use tracing::{debug, info}; +use tracing::{debug, error, info}; /// Stages that require state. const STATE_STAGES: &[StageId] = &[ @@ -87,11 +87,6 @@ pub struct ImportCommand { #[arg(long, verbatim_doc_comment)] no_state: bool, - /// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env - /// var is read by another process too, in order to make below Bedrock import work. - #[arg(long, verbatim_doc_comment, env = "OP_RETH_MAINNET_BELOW_BEDROCK")] - op_mainnet_below_bedrock: bool, - /// Chunk byte length. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, @@ -109,27 +104,23 @@ pub struct ImportCommand { impl ImportCommand { /// Execute `import` command - pub async fn execute(mut self) -> eyre::Result<()> { + pub async fn execute(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); - if self.op_mainnet_below_bedrock { - self.no_state = true; - debug!(target: "reth::cli", "Importing OP mainnet below bedrock"); - } - if self.no_state { - debug!(target: "reth::cli", "Stages requiring state disabled"); + info!(target: "reth::cli", "Disabled stages requiring state"); } debug!(target: "reth::cli", - chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), "Chunking chain import" + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" ); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); - let mut config: Config = self.load_config(config_path.clone())?; + let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to @@ -155,6 +146,9 @@ impl ImportCommand { // open file let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + while let Some(file_client) = reader.next_chunk().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", @@ -164,20 +158,22 @@ impl ImportCommand { let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; info!(target: "reth::cli", "Chain file chunk read"); - let (mut pipeline, events) = self - .build_import_pipeline( - &config, + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.total_transactions(); + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new( provider_factory.clone(), - &consensus, - Arc::new(file_client), - StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), - PruneModes::default(), - ), - self.no_state, - ) - .await?; + provider_factory.static_file_provider(), + PruneModes::default(), + ), + true, + ) + .await?; // override the tip pipeline.set_tip(tip); @@ -202,104 +198,129 @@ impl ImportCommand { } } - info!(target: "reth::cli", "Chain file imported"); - Ok(()) - } + let provider = provider_factory.provider()?; - async fn build_import_pipeline( - &self, - config: &Config, - provider_factory: ProviderFactory, - consensus: &Arc, - file_client: Arc, - static_file_producer: StaticFileProducer, - no_state: bool, - ) -> eyre::Result<(Pipeline, impl Stream)> - where - DB: Database + Clone + Unpin + 'static, - C: Consensus + 'static, - { - if !file_client.has_canonical_blocks() { - eyre::bail!("unable to import non canonical blocks"); - } + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; - // Retrieve latest header found in the database. - let last_block_number = provider_factory.last_block_number()?; - let local_head = provider_factory - .sealed_header(last_block_number)? - .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; - - let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(file_client.clone(), consensus.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - header_downloader.update_local_head(local_head); - header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); - - let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(file_client.clone(), consensus.clone(), provider_factory.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - body_downloader - .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) - .expect("failed to set download range"); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - - let max_block = file_client.max_block().unwrap_or(0); - - let mut pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - // we want to sync all blocks the file client provides or 0 if empty - .with_max_block(max_block) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - HeaderSyncMode::Tip(tip_rx), - consensus.clone(), - header_downloader, - body_downloader, - factory.clone(), - config.stages.etl.clone(), - ) - .set(SenderRecoveryStage { - commit_threshold: config.stages.sender_recovery.commit_threshold, - }) - .set(ExecutionStage::new( - factory, - ExecutionStageThresholds { - max_blocks: config.stages.execution.max_blocks, - max_changes: config.stages.execution.max_changes, - max_cumulative_gas: config.stages.execution.max_cumulative_gas, - max_duration: config.stages.execution.max_duration, - }, - config - .stages - .merkle - .clean_threshold - .max(config.stages.account_hashing.clean_threshold) - .max(config.stages.storage_hashing.clean_threshold), - config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), - ExExManagerHandle::empty(), - )) - .disable_all_if(STATE_STAGES, || no_state), - ) - .build(provider_factory, static_file_producer); + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } - let events = pipeline.events().map(Into::into); + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); - Ok((pipeline, events)) + Ok(()) } +} - /// Loads the reth config - fn load_config(&self, config_path: PathBuf) -> eyre::Result { - confy::load_path::(config_path.clone()) - .wrap_err_with(|| format!("Could not load config file {config_path:?}")) +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub async fn build_import_pipeline( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc, + static_file_producer: StaticFileProducer, + should_exec: bool, +) -> eyre::Result<(Pipeline, impl Stream)> +where + DB: Database + Clone + Unpin + 'static, + C: Consensus + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let factory = + reth_revm::EvmProcessorFactory::new(provider_factory.chain_spec(), EthEvmConfig::default()); + + let max_block = file_client.max_block().unwrap_or(0); + + let mut pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + HeaderSyncMode::Tip(tip_rx), + consensus.clone(), + header_downloader, + body_downloader, + factory.clone(), + config.stages.etl.clone(), + ) + .set(SenderRecoveryStage { + commit_threshold: config.stages.sender_recovery.commit_threshold, + }) + .set(ExecutionStage::new( + factory, + ExecutionStageThresholds { + max_blocks: config.stages.execution.max_blocks, + max_changes: config.stages.execution.max_changes, + max_cumulative_gas: config.stages.execution.max_cumulative_gas, + max_duration: config.stages.execution.max_duration, + }, + config + .stages + .merkle + .clean_threshold + .max(config.stages.account_hashing.clean_threshold) + .max(config.stages.storage_hashing.clean_threshold), + config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), + ExExManagerHandle::empty(), + )) + .disable_all_if(STATE_STAGES, || should_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} + +/// Loads the reth config +pub fn load_config(config_path: PathBuf) -> eyre::Result { + confy::load_path::(config_path.clone()) + .wrap_err_with(|| format!("Could not load config file {config_path:?}")) } #[cfg(test)] diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs new file mode 100644 index 000000000..02620f47f --- /dev/null +++ b/bin/reth/src/commands/import_op.rs @@ -0,0 +1,274 @@ +//! Command that initializes the node by importing a chain from a file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + commands::import::{build_import_pipeline, load_config}, + dirs::{DataDirPath, MaybePlatformPath}, + version::SHORT_VERSION, +}; +use clap::Parser; +use reth_beacon_consensus::BeaconConsensus; +use reth_config::{config::EtlConfig, Config}; + +use reth_db::{init_db, tables, transaction::DbTx}; +use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}; + +use reth_node_core::init::init_genesis; + +use reth_primitives::{hex, stage::StageId, ChainSpec, PruneModes, TxHash}; +use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; +use reth_static_file::StaticFileProducer; +use std::{path::PathBuf, sync::Arc}; + +use tracing::{debug, error, info}; + +/// Syncs RLP encoded blocks from a file. +#[derive(Debug, Parser)] +pub struct ImportOpCommand { + /// The path to the configuration file to use. + #[arg(long, value_name = "FILE", verbatim_doc_comment)] + config: Option, + + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a block file for import. + /// + /// The online stages (headers and bodies) are replaced by a file import, after which the + /// remaining stages are executed. + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportOpCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + info!(target: "reth::cli", + "Disabled stages requiring state, since cannot execute OVM state changes" + ); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" + ); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + + let mut config: Config = load_config(config_path.clone())?; + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if config.stages.etl.dir.is_none() { + config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + } + + let db_path = data_dir.db_path(); + + info!(target: "reth::cli", path = ?db_path, "Opening database"); + let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + + debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); + + init_genesis(provider_factory.clone())?; + + let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + info!(target: "reth::cli", "Consensus engine initialized"); + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + let mut total_filtered_out_dup_txns = 0; + + while let Some(mut file_client) = reader.next_chunk().await? { + // create a new FileClient from chunk read from file + info!(target: "reth::cli", + "Importing chain file chunk" + ); + + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; + info!(target: "reth::cli", "Chain file chunk read"); + + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.bodies_len(); + + for (block_number, body) in file_client.bodies_iter_mut() { + body.transactions.retain(|tx| { + if is_duplicate(tx.hash, *block_number) { + total_filtered_out_dup_txns += 1; + return false + } + true + }) + } + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + PruneModes::default(), + ), + false, + ) + .await?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::cli", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_events::node::handle_events( + None, + latest_block_number, + events, + db.clone(), + )); + + // Run pipeline + info!(target: "reth::cli", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => {}, + } + } + + let provider = provider_factory.provider()?; + + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; + + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } + + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); + + Ok(()) + } +} + +/// A transaction that has been replayed in chain below Bedrock. +#[derive(Debug)] +pub struct ReplayedTx { + tx_hash: TxHash, + original_block: u64, +} + +impl ReplayedTx { + /// Returns a new instance. + pub const fn new(tx_hash: TxHash, original_block: u64) -> Self { + Self { tx_hash, original_block } + } +} + +/// Transaction 0x9ed8..9cb9, first seen in block 985. +pub const TX_BLOCK_985: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9")), + 985, +); + +/// Transaction 0x86f8..76e5, first seen in block 123 322. +pub const TX_BLOCK_123_322: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("c033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6")), + 123_322, +); + +/// Transaction 0x86f8..76e5, first seen in block 1 133 328. +pub const TX_BLOCK_1_133_328: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5")), + 1_133_328, +); + +/// Transaction 0x3cc2..cd4e, first seen in block 1 244 152. +pub const TX_BLOCK_1_244_152: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e")), + 1_244_152, +); + +/// List of original occurrences of all duplicate transactions below Bedrock. +pub const TX_DUP_ORIGINALS: [ReplayedTx; 4] = + [TX_BLOCK_985, TX_BLOCK_123_322, TX_BLOCK_1_133_328, TX_BLOCK_1_244_152]; + +/// Returns `true` if transaction is the second or third appearance of the transaction. +pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool { + for ReplayedTx { tx_hash: dup_tx_hash, original_block } in TX_DUP_ORIGINALS { + if tx_hash == dup_tx_hash && block_number != original_block { + return true + } + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_common_import_command_chain_args() { + for chain in SUPPORTED_CHAINS { + let args: ImportOpCommand = + ImportOpCommand::parse_from(["reth", "--chain", chain, "."]); + assert_eq!( + Ok(args.chain.chain), + chain.parse::(), + "failed to parse chain {chain}" + ); + } + } +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 03d5a8287..a005d5e8b 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -5,6 +5,7 @@ pub mod db; pub mod debug_cmd; pub mod dump_genesis; pub mod import; +pub mod import_op; pub mod init_cmd; pub mod init_state; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 362ed3c40..ee783a1a4 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -222,6 +222,23 @@ impl FileClient { pub fn bodies_len(&self) -> usize { self.bodies.len() } + + /// Returns an iterator over headers in the client. + pub fn headers_iter(&mut self) -> impl Iterator { + self.headers.values() + } + + /// Returns a mutable iterator over bodies in the client. + pub fn bodies_iter_mut(&mut self) -> impl Iterator { + let bodies = &mut self.bodies; + let headers = &self.headers; + headers.keys().zip(bodies.values_mut()) + } + + /// Returns the current number of transactions in the client. + pub fn total_transactions(&self) -> usize { + self.bodies.iter().flat_map(|(_, body)| &body.transactions).count() + } } impl HeadersClient for FileClient { diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index 2f5de34ee..d4926fea1 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -79,6 +79,11 @@ impl StageId { matches!(self, StageId::Headers | StageId::Bodies) } + /// Returns `true` if it's [TransactionLookup](StageId::TransactionLookup) stage. + pub fn is_tx_lookup(&self) -> bool { + matches!(self, StageId::TransactionLookup) + } + /// Returns true indicating if it's the finish stage [StageId::Finish] pub fn is_finish(&self) -> bool { matches!(self, StageId::Finish) diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 101c52258..342183905 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -153,18 +153,19 @@ impl Stage for TransactionLookupStage { ); } + let key = RawKey::::from_vec(hash); if append_only { - txhash_cursor.append( - RawKey::::from_vec(hash), - RawValue::::from_vec(number), - )?; + txhash_cursor.append(key, RawValue::::from_vec(number))? } else { - txhash_cursor.insert( - RawKey::::from_vec(hash), - RawValue::::from_vec(number), - )?; + txhash_cursor.insert(key, RawValue::::from_vec(number))? } } + + trace!(target: "sync::stages::transaction_lookup", + total_hashes, + "Transaction hashes inserted" + ); + break } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index c96a05937..2cae000ce 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -354,6 +354,11 @@ impl DatabaseProvider { |_| true, ) } + + /// Returns a reference to the [`ChainSpec`]. + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec + } } impl DatabaseProvider { From 99924e4244cc829b00866d73f398f720daee78b5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 1 May 2024 15:59:46 +0200 Subject: [PATCH 147/250] perf(build): make maxperf-op (#7967) --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index fd5a252f1..5ac3bb468 100644 --- a/Makefile +++ b/Makefile @@ -263,6 +263,10 @@ update-book-cli: ## Update book cli documentation. maxperf: ## Builds `reth` with the most aggressive optimisations. RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak +.PHONY: maxperf-op +maxperf-op: ## Builds `op-reth` with the most aggressive optimisations. + RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth + .PHONY: maxperf-no-asm maxperf-no-asm: ## Builds `reth` with the most aggressive optimisations, minus the "asm-keccak" feature. RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc From c1f5b45bbd07ef21f97f23be65b84f18bf2e0647 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 1 May 2024 16:46:50 +0200 Subject: [PATCH 148/250] fix(grafana): tx pool dashboard units (#8020) --- etc/grafana/dashboards/reth-mempool.json | 46 +++++++++--------------- 1 file changed, 17 insertions(+), 29 deletions(-) diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 07212ac3b..3ba499a9a 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -573,7 +573,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -726,7 +726,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -858,7 +858,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -1638,8 +1638,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1773,8 +1772,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1904,8 +1902,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2024,8 +2021,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2144,8 +2140,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2367,8 +2362,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2487,8 +2481,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2624,8 +2617,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2793,8 +2785,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2889,8 +2880,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3032,8 +3022,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3340,8 +3329,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3425,8 +3413,8 @@ }, "timepicker": {}, "timezone": "", - "title": "reth - mempool", + "title": "Reth - Transaction Pool", "uid": "bee34f59-c79c-4669-a000-198057b3703d", - "version": 1, + "version": 3, "weekStart": "" } \ No newline at end of file From f157ec83b6ead4dc7bebf41a080193dca2ccd8fd Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 1 May 2024 16:59:42 +0200 Subject: [PATCH 149/250] chore: remove redundant suffix in ChainPath methods (#8025) --- bin/reth/src/commands/db/mod.rs | 4 +- bin/reth/src/commands/db/static_files/mod.rs | 5 +-- bin/reth/src/commands/db/stats.rs | 4 +- .../src/commands/debug_cmd/build_block.rs | 6 +-- bin/reth/src/commands/debug_cmd/execution.rs | 12 +++--- .../commands/debug_cmd/in_memory_merkle.rs | 10 ++--- bin/reth/src/commands/debug_cmd/merkle.rs | 10 ++--- .../src/commands/debug_cmd/replay_engine.rs | 10 ++--- bin/reth/src/commands/import.rs | 8 ++-- bin/reth/src/commands/import_op.rs | 8 ++-- bin/reth/src/commands/init_cmd.rs | 4 +- bin/reth/src/commands/init_state.rs | 4 +- bin/reth/src/commands/node/mod.rs | 10 ++--- bin/reth/src/commands/p2p/mod.rs | 6 +-- .../src/commands/recover/storage_tries.rs | 4 +- bin/reth/src/commands/stage/drop.rs | 4 +- bin/reth/src/commands/stage/dump/execution.rs | 8 +--- .../commands/stage/dump/hashing_account.rs | 8 +--- .../commands/stage/dump/hashing_storage.rs | 8 +--- bin/reth/src/commands/stage/dump/merkle.rs | 8 +--- bin/reth/src/commands/stage/dump/mod.rs | 4 +- bin/reth/src/commands/stage/run.rs | 21 ++++------ bin/reth/src/commands/stage/unwind.rs | 10 ++--- crates/node-core/src/dirs.rs | 40 ++++++++++--------- crates/node-core/src/node_config.rs | 4 +- crates/node-ethereum/src/node.rs | 4 +- crates/node/builder/src/builder/mod.rs | 2 +- crates/node/builder/src/launch/common.rs | 8 ++-- crates/optimism/node/src/node.rs | 4 +- examples/custom-node-components/src/main.rs | 2 +- 30 files changed, 110 insertions(+), 130 deletions(-) diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index f28f8375f..aeaf1d7e8 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -108,9 +108,9 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); let db_args = self.db.database_args(); - let static_files_path = data_dir.static_files_path(); + let static_files_path = data_dir.static_files(); match self.command { // TODO: We'll need to add this on the DB trait. diff --git a/bin/reth/src/commands/db/static_files/mod.rs b/bin/reth/src/commands/db/static_files/mod.rs index 9391db76c..8f5930e10 100644 --- a/bin/reth/src/commands/db/static_files/mod.rs +++ b/bin/reth/src/commands/db/static_files/mod.rs @@ -96,11 +96,10 @@ impl Command { }); let db = open_db_read_only( - data_dir.db_path().as_path(), + data_dir.db().as_path(), db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)), )?; - let provider_factory = - Arc::new(ProviderFactory::new(db, chain, data_dir.static_files_path())?); + let provider_factory = Arc::new(ProviderFactory::new(db, chain, data_dir.static_files())?); { if !self.only_bench { diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 5ffc136dd..474603c74 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -174,8 +174,8 @@ impl Command { ]); } - let static_files = iter_static_files(data_dir.static_files_path())?; - let static_file_provider = StaticFileProvider::new(data_dir.static_files_path())?; + let static_files = iter_static_files(data_dir.static_files())?; + let static_file_provider = StaticFileProvider::new(data_dir.static_files())?; let mut total_data_size = 0; let mut total_index_size = 0; diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index aee51ee79..9d5942ae1 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -114,7 +114,7 @@ impl Command { let factory = ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?; let provider = factory.provider()?; @@ -148,7 +148,7 @@ impl Command { pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database @@ -156,7 +156,7 @@ impl Command { let provider_factory = ProviderFactory::new( Arc::clone(&db), Arc::clone(&self.chain), - data_dir.static_files_path(), + data_dir.static_files(), )?; let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index df6b4d111..a83ea19fd 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -173,7 +173,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -206,17 +206,17 @@ impl Command { let mut config = Config::default(); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; @@ -225,14 +225,14 @@ impl Command { // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index f13b503f1..e68231a76 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -94,7 +94,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -109,12 +109,12 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; let provider = factory.provider()?; // Look up merkle checkpoint @@ -126,14 +126,14 @@ impl Command { // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index d806306d4..07075ff26 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -104,7 +104,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -119,24 +119,24 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; let provider_rw = factory.provider_rw()?; // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index f59af6218..1360c2f1b 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -101,7 +101,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -116,13 +116,13 @@ impl Command { // Add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // Initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); @@ -146,14 +146,14 @@ impl Command { // Set up network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index e1851f51e..3c191d8bb 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -118,23 +118,23 @@ impl ImportCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 02620f47f..1c5a74015 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -85,23 +85,23 @@ impl ImportOpCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); diff --git a/bin/reth/src/commands/init_cmd.rs b/bin/reth/src/commands/init_cmd.rs index 7a2988ebd..bdd8acb52 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/bin/reth/src/commands/init_cmd.rs @@ -51,12 +51,12 @@ impl InitCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(&db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; info!(target: "reth::cli", "Writing genesis block"); diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index c05f064b3..fa70264e5 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -72,12 +72,12 @@ impl InitStateCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(&db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; info!(target: "reth::cli", "Writing genesis block"); diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 5f95c534d..9f2a4d67a 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -180,7 +180,7 @@ impl NodeCommand { let _ = node_config.install_prometheus_recorder()?; let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); tracing::info!(target: "reth::cli", path = ?db_path, "Opening database"); let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics()); @@ -280,14 +280,14 @@ mod tests { NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let config_path = cmd.config.unwrap_or_else(|| data_dir.config_path()); + let config_path = cmd.config.unwrap_or_else(|| data_dir.config()); assert_eq!(config_path, Path::new("my/path/to/reth.toml")); let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config()); let end = format!("reth/{}/reth.toml", SUPPORTED_CHAINS[0]); assert!(config_path.ends_with(end), "{:?}", cmd.config); } @@ -296,14 +296,14 @@ mod tests { fn parse_db_path() { let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]); assert!(db_path.ends_with(end), "{:?}", cmd.config); let cmd = NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); assert_eq!(db_path, Path::new("my/custom/path/db")); } diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index b67881e64..35d111e57 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -105,7 +105,7 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = confy::load_path(&config_path).unwrap_or_default(); @@ -119,7 +119,7 @@ impl Command { config.peers.trusted_nodes_only = self.trusted_only; - let default_secret_key_path = data_dir.p2p_secret_path(); + let default_secret_key_path = data_dir.p2p_secret(); let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path); let p2p_secret_key = get_secret_key(&secret_key_path)?; @@ -133,7 +133,7 @@ impl Command { let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new( noop_db, self.chain.clone(), - data_dir.static_files_path(), + data_dir.static_files(), )?)); if self.discovery.enable_discv5_discovery { diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 7a1c2ccc2..025a170a0 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -50,11 +50,11 @@ impl Command { /// Execute `storage-tries` recovery command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(factory.clone())?; diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 5c1440602..625a3f36b 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -54,12 +54,12 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; let db = open_db(db_path.as_ref(), self.db.database_args())?; let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; let static_file_provider = provider_factory.static_file_provider(); let tool = DbTool::new(provider_factory, self.chain.clone())?; diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index 7d2d8f0ba..571ce486a 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -20,7 +20,7 @@ pub(crate) async fn dump_execution_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; @@ -28,11 +28,7 @@ pub(crate) async fn dump_execution_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/bin/reth/src/commands/stage/dump/hashing_account.rs index 35bbfa4d7..2f28ba129 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/bin/reth/src/commands/stage/dump/hashing_account.rs @@ -15,7 +15,7 @@ pub(crate) async fn dump_hashing_account_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; // Import relevant AccountChangeSets output_db.update(|tx| { @@ -30,11 +30,7 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/bin/reth/src/commands/stage/dump/hashing_storage.rs index a7e387880..7d38892dc 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/bin/reth/src/commands/stage/dump/hashing_storage.rs @@ -15,17 +15,13 @@ pub(crate) async fn dump_hashing_storage_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 2dfd0172b..55a8ec76d 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -24,7 +24,7 @@ pub(crate) async fn dump_merkle_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; output_db.update(|tx| { tx.import_table_with_range::( @@ -46,11 +46,7 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) diff --git a/bin/reth/src/commands/stage/dump/mod.rs b/bin/reth/src/commands/stage/dump/mod.rs index 4e1cace6e..fa4184356 100644 --- a/bin/reth/src/commands/stage/dump/mod.rs +++ b/bin/reth/src/commands/stage/dump/mod.rs @@ -102,11 +102,11 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; info!(target: "reth::cli", "Database opened"); diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 66fb25b47..d798c87d1 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -130,23 +130,20 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let config: Config = confy::load_path(config_path).unwrap_or_default(); info!(target: "reth::cli", "reth {} starting stage {:?}", SHORT_VERSION, self.stage); // use the overridden db path if specified - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let factory = ProviderFactory::new( - Arc::clone(&db), - self.chain.clone(), - data_dir.static_files_path(), - )?; + let factory = + ProviderFactory::new(Arc::clone(&db), self.chain.clone(), data_dir.static_files())?; let mut provider_rw = factory.provider_rw()?; if let Some(listen_addr) = self.metrics { @@ -165,9 +162,7 @@ impl Command { let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); let etl_config = EtlConfig::new( - Some( - self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(&data_dir.data_dir_path())), - ), + Some(self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(data_dir.data_dir()))), self.etl_file_size.unwrap_or(EtlConfig::default_file_size()), ); @@ -188,15 +183,15 @@ impl Command { .network .p2p_secret_key .clone() - .unwrap_or_else(|| data_dir.p2p_secret_path()); + .unwrap_or_else(|| data_dir.p2p_secret()); let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); let provider_factory = Arc::new(ProviderFactory::new( db.clone(), self.chain.clone(), - data_dir.static_files_path(), + data_dir.static_files(), )?); let network = self diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 9ffaad979..0c4260c0c 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -83,16 +83,16 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); if !db_path.exists() { eyre::bail!("Database {db_path:?} does not exist.") } - let config_path = data_dir.config_path(); + let config_path = data_dir.config(); let config: Config = confy::load_path(config_path).unwrap_or_default(); let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?); let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; let range = self.command.unwind_range(provider_factory.clone())?; if *range.start() == 0 { @@ -148,9 +148,9 @@ impl Command { // Even though we are not planning to download anything, we need to initialize Body and // Header stage with a network client let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); let network = self .network .network_config( diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 223e65bb2..75919f6f0 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -271,63 +271,65 @@ impl ChainPath { /// Returns the path to the reth data directory for this chain. /// /// `/` - pub fn data_dir_path(&self) -> PathBuf { - self.0.as_ref().into() + pub fn data_dir(&self) -> &Path { + self.0.as_ref() } /// Returns the path to the db directory for this chain. /// /// `//db` - pub fn db_path(&self) -> PathBuf { - self.0.join("db").into() + pub fn db(&self) -> PathBuf { + self.data_dir().join("db") } /// Returns the path to the static_files directory for this chain. - pub fn static_files_path(&self) -> PathBuf { - self.0.join("static_files").into() + /// + /// `//static_files` + pub fn static_files(&self) -> PathBuf { + self.data_dir().join("static_files") } /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` - pub fn p2p_secret_path(&self) -> PathBuf { - self.0.join("discovery-secret").into() + pub fn p2p_secret(&self) -> PathBuf { + self.data_dir().join("discovery-secret") } /// Returns the path to the known peers file for this chain. /// /// `//known-peers.json` - pub fn known_peers_path(&self) -> PathBuf { - self.0.join("known-peers.json").into() + pub fn known_peers(&self) -> PathBuf { + self.data_dir().join("known-peers.json") } /// Returns the path to the blobstore directory for this chain where blobs of unfinalized /// transactions are stored. /// /// `//blobstore` - pub fn blobstore_path(&self) -> PathBuf { - self.0.join("blobstore").into() + pub fn blobstore(&self) -> PathBuf { + self.data_dir().join("blobstore") } /// Returns the path to the local transactions backup file /// /// `//txpool-transactions-backup.rlp` - pub fn txpool_transactions_path(&self) -> PathBuf { - self.0.join("txpool-transactions-backup.rlp").into() + pub fn txpool_transactions(&self) -> PathBuf { + self.data_dir().join("txpool-transactions-backup.rlp") } /// Returns the path to the config file for this chain. /// /// `//reth.toml` - pub fn config_path(&self) -> PathBuf { - self.0.join("reth.toml").into() + pub fn config(&self) -> PathBuf { + self.data_dir().join("reth.toml") } /// Returns the path to the jwtsecret file for this chain. /// /// `//jwt.hex` - pub fn jwt_path(&self) -> PathBuf { - self.0.join("jwt.hex").into() + pub fn jwt(&self) -> PathBuf { + self.data_dir().join("jwt.hex") } } @@ -359,7 +361,7 @@ mod tests { let path = path.unwrap_or_chain_default(Chain::mainnet()); assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}"); - let db_path = path.db_path(); + let db_path = path.db(); assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}"); let path = MaybePlatformPath::::from_str("my/path/to/datadir").unwrap(); diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index c25395e07..411a8b447 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -234,7 +234,7 @@ impl NodeConfig { /// Get the network secret from the given data dir pub fn network_secret(&self, data_dir: &ChainPath) -> eyre::Result { let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); debug!(target: "reth::cli", ?network_secret_path, "Loading p2p key file"); let secret_key = get_secret_key(&network_secret_path)?; Ok(secret_key) @@ -299,7 +299,7 @@ impl NodeConfig { ) -> eyre::Result> { info!(target: "reth::cli", "Connecting to P2P network"); let secret_key = self.network_secret(data_dir)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path)) } diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 4f52027b4..9de0cbe6e 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -100,7 +100,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?; + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) @@ -114,7 +114,7 @@ where let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config()); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 815b13858..8c899df15 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -533,7 +533,7 @@ impl BuilderContext { self.executor.spawn_critical("p2p txpool", txpool); self.executor.spawn_critical("p2p eth request handler", eth); - let default_peers_path = self.data_dir().known_peers_path(); + let default_peers_path = self.data_dir().known_peers(); let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); self.executor.spawn_critical_with_graceful_shutdown_signal( "p2p network task", diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 28453a047..63060f647 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -61,7 +61,7 @@ impl LaunchContext { /// Loads the reth config with the configured `data_dir` and overrides settings according to the /// `config`. pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { - let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path()); + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config()); let mut toml_config = confy::load_path::(&config_path) .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; @@ -192,7 +192,7 @@ impl LaunchContextWith> { pub fn ensure_etl_datadir(mut self) -> Self { if self.toml_config_mut().stages.etl.dir.is_none() { self.toml_config_mut().stages.etl.dir = - Some(EtlConfig::from_datadir(&self.data_dir().data_dir_path())) + Some(EtlConfig::from_datadir(self.data_dir().data_dir())) } self @@ -273,7 +273,7 @@ impl LaunchContextWith> { /// Loads the JWT secret for the engine API pub fn auth_jwt_secret(&self) -> eyre::Result { - let default_jwt_path = self.data_dir().jwt_path(); + let default_jwt_path = self.data_dir().jwt(); let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; Ok(secret) } @@ -299,7 +299,7 @@ where let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), - self.data_dir().static_files_path(), + self.data_dir().static_files(), )? .with_static_files_metrics(); diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index a7b195f48..c95f3dd95 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -119,7 +119,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?; + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) @@ -139,7 +139,7 @@ where ctx.pool_config(), ); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index a6db90674..ac98de7af 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -64,7 +64,7 @@ where let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { From 399afd802c2ed17dcbc8fb7b0219185ca73bde3f Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 1 May 2024 17:22:49 +0200 Subject: [PATCH 150/250] feat(node_builder): allow borrowing self in `ConfigureEvm::evm` (#8024) --- crates/consensus/auto-seal/src/lib.rs | 2 +- crates/consensus/auto-seal/src/task.rs | 2 +- crates/ethereum/evm/src/execute.rs | 2 +- crates/evm/src/lib.rs | 11 ++-- crates/node/builder/src/builder/mod.rs | 5 ++ crates/node/builder/src/components/execute.rs | 2 +- crates/optimism/evm/src/execute.rs | 2 +- crates/revm/src/factory.rs | 7 +-- crates/revm/src/optimism/processor.rs | 3 +- crates/revm/src/processor.rs | 58 ++++++++----------- examples/exex/rollup/src/main.rs | 3 +- 11 files changed, 46 insertions(+), 51 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index c09dcbcc8..402a6c983 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -426,7 +426,7 @@ impl StorageInner { withdrawals: Option, client: &impl StateProviderFactory, chain_spec: Arc, - evm_config: EvmConfig, + evm_config: &EvmConfig, ) -> Result<(SealedHeader, BundleStateWithReceipts), BlockExecutionError> where EvmConfig: ConfigureEvm, diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 6009cd810..7e2a700ef 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -145,7 +145,7 @@ where withdrawals.clone(), &client, chain_spec, - evm_config, + &evm_config, ) { Ok((new_header, bundle_state)) => { // clear all transactions from pool diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 64b69d1e5..4239fe449 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -137,7 +137,7 @@ where /// /// It does __not__ apply post-execution changes. fn execute_pre_and_transactions( - &mut self, + &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 154aac2d7..d8e50b759 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -24,14 +24,17 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; + fn evm<'a, DB: Database + 'a>( + &'a self, + db: DB, + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// /// This will preserve any handler modifications fn evm_with_env<'a, DB: Database + 'a>( - &self, + &'a self, db: DB, env: EnvWithHandlerCfg, ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { @@ -48,7 +51,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// /// This will preserve any handler modifications fn evm_with_env_and_inspector<'a, DB, I>( - &self, + &'a self, db: DB, env: EnvWithHandlerCfg, inspector: I, @@ -68,7 +71,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Caution: This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is /// up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector<'a, DB, I>(&'a self, db: DB, inspector: I) -> Evm<'a, I, DB> where DB: Database + 'a, I: GetInspector, diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 8c899df15..b6f0a191e 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -235,6 +235,11 @@ impl WithLaunchContext> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { + /// Returns a reference to the node builder's config. + pub fn config(&self) -> &NodeConfig { + self.builder.config() + } + /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 417423d54..01684e9c2 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -9,7 +9,7 @@ pub trait ExecutorBuilder: Send { type EVM: ConfigureEvm; // TODO(mattsse): integrate `Executor` - /// Creates the transaction pool. + /// Creates the EVM config. fn build_evm( self, ctx: &BuilderContext, diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index a77f42205..0a5e05780 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -137,7 +137,7 @@ where /// /// It does __not__ apply post-execution changes. fn execute_pre_and_transactions( - &mut self, + &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs index 61e43cc18..fdaae52c0 100644 --- a/crates/revm/src/factory.rs +++ b/crates/revm/src/factory.rs @@ -46,11 +46,8 @@ where sp: SP, ) -> Box + 'a> { let database_state = StateProviderDatabase::new(sp); - let mut evm = EVMProcessor::new_with_db( - self.chain_spec.clone(), - database_state, - self.evm_config.clone(), - ); + let mut evm = + EVMProcessor::new_with_db(self.chain_spec.clone(), database_state, &self.evm_config); if let Some(stack) = &self.stack { evm.set_stack(stack.clone()); } diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs index 01ec2efde..9fe51d059 100644 --- a/crates/revm/src/optimism/processor.rs +++ b/crates/revm/src/optimism/processor.rs @@ -242,10 +242,11 @@ mod tests { chain_spec: Arc, db: StateProviderTest, ) -> EVMProcessor<'a, TestEvmConfig> { + static CONFIG: std::sync::OnceLock = std::sync::OnceLock::new(); let mut executor = EVMProcessor::new_with_db( chain_spec, StateProviderDatabase::new(db), - TestEvmConfig::default(), + CONFIG.get_or_init(TestEvmConfig::default), ); executor.evm.context.evm.db.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); executor diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index c22272abc..487cec528 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -7,7 +7,7 @@ use revm::{ primitives::{CfgEnvWithHandlerCfg, ResultAndState}, Evm, State, }; -use std::{sync::Arc, time::Instant}; +use std::{marker::PhantomData, sync::Arc, time::Instant}; #[cfg(not(feature = "optimism"))] use tracing::{debug, trace}; @@ -59,7 +59,7 @@ pub struct EVMProcessor<'a, EvmConfig> { /// Execution stats pub(crate) stats: BlockExecutorStats, /// The type that is able to configure the EVM environment. - _evm_config: EvmConfig, + _phantom: PhantomData, } impl<'a, EvmConfig> EVMProcessor<'a, EvmConfig> @@ -75,7 +75,7 @@ where pub fn new_with_db( chain_spec: Arc, db: StateProviderDatabase, - evm_config: EvmConfig, + evm_config: &'a EvmConfig, ) -> Self { let state = State::builder() .with_database_boxed(Box::new(db)) @@ -89,7 +89,7 @@ where pub fn new_with_state( chain_spec: Arc, revm_state: StateDBBox<'a, ProviderError>, - evm_config: EvmConfig, + evm_config: &'a EvmConfig, ) -> Self { let stack = InspectorStack::new(InspectorStackConfig::default()); let evm = evm_config.evm_with_inspector(revm_state, stack); @@ -98,7 +98,7 @@ where evm, batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), - _evm_config: evm_config, + _phantom: PhantomData, } } @@ -507,11 +507,9 @@ mod tests { ); // execute invalid header (no parent beacon block root) - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); // attempt to execute a block without parent beacon block root, expect err let err = executor @@ -599,11 +597,9 @@ mod tests { .build(), ); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); executor.init_env(&header, U256::ZERO); // get the env @@ -648,11 +644,9 @@ mod tests { .build(), ); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); // construct the header for block one let header = Header { @@ -702,11 +696,9 @@ mod tests { let mut header = chain_spec.genesis_header(); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); executor.init_env(&header, U256::ZERO); // attempt to execute the genesis block with non-zero parent beacon block root, expect err @@ -781,11 +773,9 @@ mod tests { ); // execute header - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); executor.init_env(&header, U256::ZERO); // ensure that the env is configured with a base fee @@ -843,11 +833,9 @@ mod tests { let chain_id = chain_spec.chain.id(); // execute header - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); + let evm_config = TestEvmConfig::default(); + let mut executor = + EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); // Create a test transaction that gonna fail let transaction = TransactionSigned::from_transaction_and_signature( diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index cd2b0c94d..db33aaf72 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -298,7 +298,8 @@ fn execute_block( ) .with_bundle_update() .build(); - let mut evm = EthEvmConfig::default().evm(state); + let evm_config = EthEvmConfig::default(); + let mut evm = evm_config.evm(state); // Set state clear flag. evm.db_mut().set_state_clear_flag( From 0938504f4a265eec74e0b833ab5c41d66649bdee Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 1 May 2024 19:32:25 +0200 Subject: [PATCH 151/250] chore: reduce number of Evm monomorphizations (#8030) --- crates/payload/builder/src/database.rs | 12 ++- crates/payload/ethereum/src/lib.rs | 81 ++++++++++++++------- crates/payload/optimism/src/builder.rs | 67 +++++++++++------ crates/rpc/rpc/src/debug.rs | 36 +++------ crates/rpc/rpc/src/eth/api/call.rs | 4 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 5 +- crates/rpc/rpc/src/eth/api/transactions.rs | 31 +++----- crates/rpc/rpc/src/eth/revm_utils.rs | 7 +- crates/rpc/rpc/src/trace.rs | 2 +- 9 files changed, 139 insertions(+), 106 deletions(-) diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 5b5239fdd..ac36de98c 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -61,10 +61,13 @@ impl CachedReads { } } +/// A [Database] that caches reads inside [CachedReads]. #[derive(Debug)] -struct CachedReadsDbMut<'a, DB> { - cached: &'a mut CachedReads, - db: DB, +pub struct CachedReadsDbMut<'a, DB> { + /// The cache of reads. + pub cached: &'a mut CachedReads, + /// The underlying database. + pub db: DB, } impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { @@ -126,7 +129,8 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { /// `revm::db::State` for repeated payload build jobs. #[derive(Debug)] pub struct CachedReadsDBRef<'a, DB> { - inner: RefCell>, + /// The inner cache reads db mut. + pub inner: RefCell>, } impl<'a, DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'a, DB> { diff --git a/crates/payload/ethereum/src/lib.rs b/crates/payload/ethereum/src/lib.rs index f1c0a215b..e34287f76 100644 --- a/crates/payload/ethereum/src/lib.rs +++ b/crates/payload/ethereum/src/lib.rs @@ -73,36 +73,54 @@ where debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload"); let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload"); - err - })?; + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to get state for empty payload" + ); + err + })?; let mut db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_database(StateProviderDatabase::new(state)) .with_bundle_update() .build(); let base_fee = initialized_block_env.basefee.to::(); let block_number = initialized_block_env.number.to::(); - let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); + let block_gas_limit = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( - &mut db, - &chain_spec, - block_number, - &initialized_cfg, - &initialized_block_env, - &attributes, - ).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload"); - err - })?; - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals.clone()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload"); - err - })?; + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + err + })?; + + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &chain_spec, + attributes.timestamp, + attributes.withdrawals.clone(), + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to commit withdrawals for empty payload" + ); + err + })?; // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call @@ -110,10 +128,14 @@ where // calculate the state root let bundle_state = db.take_bundle(); - let state_root = state.state_root(&bundle_state).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload"); - err - })?; + let state_root = db.database.state_root(&bundle_state).map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + err + })?; let mut excess_blob_gas = None; let mut blob_gas_used = None; @@ -178,9 +200,9 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let extra_data = config.extra_data(); let PayloadConfig { initialized_block_env, @@ -349,7 +371,10 @@ where let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state_provider.state_root(bundle.state())?; + let state_root = { + let state_provider = db.database.0.inner.borrow_mut(); + state_provider.db.state_root(bundle.state())? + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 7d8efa689..8e8bfb8f0 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -123,7 +123,7 @@ where err })?; let mut db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_database(StateProviderDatabase::new(state)) .with_bundle_update() .build(); @@ -133,22 +133,36 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( - &mut db, - &chain_spec, - block_number, - &initialized_cfg, - &initialized_block_env, - &attributes, - ).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload"); - err - })?; + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + err + })?; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.payload_attributes.timestamp, attributes.payload_attributes.withdrawals.clone()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload"); - err - })?; + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &chain_spec, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.withdrawals.clone(), + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to commit withdrawals for empty payload" + ); + err + })?; // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call @@ -156,10 +170,14 @@ where // calculate the state root let bundle_state = db.take_bundle(); - let state_root = state.state_root(&bundle_state).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload"); - err - })?; + let state_root = db.database.state_root(&bundle_state).map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + err + })?; let mut excess_blob_gas = None; let mut blob_gas_used = None; @@ -236,9 +254,9 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let extra_data = config.extra_data(); let PayloadConfig { initialized_block_env, @@ -510,7 +528,10 @@ where let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state_provider.state_root(bundle.state())?; + let state_root = { + let state_provider = db.database.0.inner.borrow_mut(); + state_provider.db.state_root(bundle.state())? + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index e47ccc466..b21adf520 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -323,14 +323,11 @@ where self.inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env, - &mut inspector, - )?; + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, &db)?; + .geth_prestate_traces(&res, prestate_config, db)?; Ok(frame) }) .await?; @@ -348,12 +345,9 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env, - &mut inspector, - )?; - let frame = inspector.try_into_mux_frame(&res, &db)?; + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let frame = inspector.try_into_mux_frame(&res, db)?; Ok(frame.into()) }) .await?; @@ -370,12 +364,9 @@ where .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { let mut inspector = JsInspector::new(code, config)?; - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env.clone(), - &mut inspector, - )?; - Ok(inspector.json_result(res, &env, &db)?) + let (res, _) = + this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; + Ok(inspector.json_result(res, &env, db)?) }) .await?; @@ -564,8 +555,7 @@ where let mut inspector = TracingInspector::new( TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let (res, _, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.into_geth_builder().geth_prestate_traces( &res, @@ -585,8 +575,7 @@ where let mut inspector = MuxInspector::try_from_config(mux_config)?; - let (res, _, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.try_into_mux_frame(&res, db)?; return Ok((frame.into(), res.state)) } @@ -598,8 +587,7 @@ where config, transaction_context.unwrap_or_default(), )?; - let (res, env, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let state = res.state.clone(); let result = inspector.json_result(res, &env, db)?; diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 191406f96..8ef2af2f5 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -451,14 +451,14 @@ where &self, env_gas_limit: U256, mut env: EnvWithHandlerCfg, - mut db: &mut CacheDB>, + db: &mut CacheDB>, ) -> EthApiError where S: StateProvider, { let req_gas_limit = env.tx.gas_limit; env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(&mut db, env) { + let (res, _) = match self.transact(db, env) { Ok(res) => res, Err(err) => return err, }; diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index aa18bf7ec..dbb148981 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -52,8 +52,8 @@ impl PendingBlockEnv { let parent_hash = origin.build_target_hash(); let state_provider = client.history_by_block_hash(parent_hash)?; - let state = StateProviderDatabase::new(&state_provider); - let mut db = State::builder().with_database(Box::new(state)).with_bundle_update().build(); + let state = StateProviderDatabase::new(state_provider); + let mut db = State::builder().with_database(state).with_bundle_update().build(); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; @@ -230,6 +230,7 @@ impl PendingBlockEnv { let logs_bloom = bundle.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root + let state_provider = &db.database; let state_root = state_provider.state_root(bundle.state())?; // create the block header diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 3e582821b..15e2b6f56 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -287,7 +287,7 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: FnOnce(StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, + F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static; /// Executes the call request at the given [BlockId]. @@ -308,7 +308,7 @@ pub trait EthTransactions: Send + Sync { inspector: I, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where - I: Inspector + Send + 'static; + I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static; /// Executes the transaction on top of the given [BlockId] with a tracer configured by the /// config. @@ -571,10 +571,7 @@ where ::Error: Into, I: GetInspector, { - let mut evm = self.inner.evm_config.evm_with_env_and_inspector(db, env, inspector); - let res = evm.transact()?; - let (_, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env)) + self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) } fn inspect_and_return_db( @@ -1066,7 +1063,7 @@ where f: F, ) -> EthResult where - F: FnOnce(StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, + F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static, { let (cfg, block_env, at) = self.evm_env_at(at).await?; @@ -1085,7 +1082,7 @@ where &mut db, overrides, )?; - f(db, env) + f(&mut db, env) }) .await .map_err(|_| EthApiError::InternalBlockingTaskError)? @@ -1098,10 +1095,7 @@ where overrides: EvmOverrides, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> { let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |mut db, env| { - this.transact(&mut db, env) - }) - .await + self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)).await } async fn spawn_inspect_call_at( @@ -1112,7 +1106,7 @@ where inspector: I, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where - I: Inspector + Send + 'static, + I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, { let this = self.clone(); self.spawn_with_call_at(request, at, overrides, move |db, env| { @@ -1133,11 +1127,9 @@ where { let this = self.clone(); self.with_state_at_block(at, |state| { - let db = CacheDB::new(StateProviderDatabase::new(state)); - + let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(db, env, &mut inspector)?; - + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res) }) } @@ -1155,10 +1147,9 @@ where { let this = self.clone(); self.spawn_with_state_at_block(at, move |state| { - let db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _, db) = this.inspect_and_return_db(db, env, &mut inspector)?; - + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) .await diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index c80aee99d..c2855163b 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -278,7 +278,10 @@ pub(crate) fn create_txn_env( } /// Caps the configured [TxEnv] `gas_limit` with the allowance of the caller. -pub(crate) fn cap_tx_gas_limit_with_caller_allowance(db: DB, env: &mut TxEnv) -> EthResult<()> +pub(crate) fn cap_tx_gas_limit_with_caller_allowance( + db: &mut DB, + env: &mut TxEnv, +) -> EthResult<()> where DB: Database, EthApiError: From<::Error>, @@ -296,7 +299,7 @@ where /// /// Returns an error if the caller has insufficient funds. /// Caution: This assumes non-zero `env.gas_price`. Otherwise, zero allowance will be returned. -pub(crate) fn caller_gas_allowance(mut db: DB, env: &TxEnv) -> EthResult +pub(crate) fn caller_gas_allowance(db: &mut DB, env: &TxEnv) -> EthResult where DB: Database, EthApiError: From<::Error>, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 047919036..5ee089a91 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -86,7 +86,7 @@ where let this = self.clone(); self.eth_api() .spawn_with_call_at(trace_request.call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let trace_res = inspector.into_parity_builder().into_trace_results_with_state( &res, &trace_request.trace_types, From 1c1cbe92317eaf89d3ff32bc6ccf896ef6e24de9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 1 May 2024 18:57:01 +0100 Subject: [PATCH 152/250] feat(pool): add `chain_id` to transaction mocks (#8031) --- .../transaction-pool/src/test_utils/mock.rs | 267 ++++++++++-------- 1 file changed, 151 insertions(+), 116 deletions(-) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 8e265e7ba..108e6073b 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -13,8 +13,9 @@ use rand::{ }; use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, + eip4844::kzg_to_versioned_hash, transaction::TryFromRecoveredTransactionError, - AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction, + AccessList, Address, BlobTransactionSidecar, Bytes, ChainId, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, @@ -94,6 +95,8 @@ macro_rules! make_setters_getters { pub enum MockTransaction { /// Legacy transaction type. Legacy { + /// The chain id of the transaction. + chain_id: Option, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -113,8 +116,35 @@ pub enum MockTransaction { /// The size of the transaction, returned in the implementation of [PoolTransaction]. size: usize, }, + /// EIP-2930 transaction type. + Eip2930 { + /// The chain id of the transaction. + chain_id: ChainId, + /// The hash of the transaction. + hash: B256, + /// The sender's address. + sender: Address, + /// The transaction nonce. + nonce: u64, + /// The transaction's destination. + to: TxKind, + /// The gas limit for the transaction. + gas_limit: u64, + /// The transaction input data. + input: Bytes, + /// The value of the transaction. + value: U256, + /// The gas price for the transaction. + gas_price: u128, + /// The access list associated with the transaction. + access_list: AccessList, + /// The size of the transaction, returned in the implementation of [PoolTransaction]. + size: usize, + }, /// EIP-1559 transaction type. Eip1559 { + /// The chain id of the transaction. + chain_id: ChainId, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -132,7 +162,7 @@ pub enum MockTransaction { /// The value of the transaction. value: U256, /// The access list associated with the transaction. - accesslist: AccessList, + access_list: AccessList, /// The transaction input data. input: Bytes, /// The size of the transaction, returned in the implementation of [PoolTransaction]. @@ -140,6 +170,8 @@ pub enum MockTransaction { }, /// EIP-4844 transaction type. Eip4844 { + /// The chain id of the transaction. + chain_id: ChainId, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -159,7 +191,7 @@ pub enum MockTransaction { /// The value of the transaction. value: U256, /// The access list associated with the transaction. - accesslist: AccessList, + access_list: AccessList, /// The transaction input data. input: Bytes, /// The sidecar information for the transaction. @@ -167,29 +199,6 @@ pub enum MockTransaction { /// The size of the transaction, returned in the implementation of [PoolTransaction]. size: usize, }, - /// EIP-2930 transaction type. - Eip2930 { - /// The hash of the transaction. - hash: B256, - /// The sender's address. - sender: Address, - /// The transaction nonce. - nonce: u64, - /// The transaction's destination. - to: TxKind, - /// The gas limit for the transaction. - gas_limit: u64, - /// The transaction input data. - input: Bytes, - /// The value of the transaction. - value: U256, - /// The gas price for the transaction. - gas_price: u128, - /// The access list associated with the transaction. - accesslist: AccessList, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. - size: usize, - }, } // === impl MockTransaction === @@ -208,6 +217,7 @@ impl MockTransaction { /// Returns a new legacy transaction with random address and hash and empty values pub fn legacy() -> Self { MockTransaction::Legacy { + chain_id: Some(1), hash: B256::random(), sender: Address::random(), nonce: 0, @@ -220,9 +230,27 @@ impl MockTransaction { } } + /// Returns a new EIP2930 transaction with random address and hash and empty values + pub fn eip2930() -> Self { + MockTransaction::Eip2930 { + chain_id: 1, + hash: B256::random(), + sender: Address::random(), + nonce: 0, + to: Address::random().into(), + gas_limit: 0, + input: Bytes::new(), + value: Default::default(), + gas_price: 0, + access_list: Default::default(), + size: Default::default(), + } + } + /// Returns a new EIP1559 transaction with random address and hash and empty values pub fn eip1559() -> Self { MockTransaction::Eip1559 { + chain_id: 1, hash: B256::random(), sender: Address::random(), nonce: 0, @@ -232,7 +260,7 @@ impl MockTransaction { to: Address::random().into(), value: Default::default(), input: Bytes::new(), - accesslist: Default::default(), + access_list: Default::default(), size: Default::default(), } } @@ -240,6 +268,7 @@ impl MockTransaction { /// Returns a new EIP4844 transaction with random address and hash and empty values pub fn eip4844() -> Self { MockTransaction::Eip4844 { + chain_id: 1, hash: B256::random(), sender: Address::random(), nonce: 0, @@ -250,7 +279,7 @@ impl MockTransaction { to: Address::random().into(), value: Default::default(), input: Bytes::new(), - accesslist: Default::default(), + access_list: Default::default(), sidecar: Default::default(), size: Default::default(), } @@ -266,22 +295,6 @@ impl MockTransaction { transaction } - /// Returns a new EIP2930 transaction with random address and hash and empty values - pub fn eip2930() -> Self { - MockTransaction::Eip2930 { - hash: B256::random(), - sender: Address::random(), - nonce: 0, - to: Address::random().into(), - gas_limit: 0, - input: Bytes::new(), - value: Default::default(), - gas_price: 0, - accesslist: Default::default(), - size: Default::default(), - } - } - /// Creates a new transaction with the given [TxType]. /// /// See the default constructors for each of the transaction types: @@ -372,9 +385,9 @@ impl MockTransaction { pub fn set_accesslist(&mut self, list: AccessList) -> &mut Self { match self { MockTransaction::Legacy { .. } => {} - MockTransaction::Eip1559 { accesslist, .. } | - MockTransaction::Eip4844 { accesslist, .. } | - MockTransaction::Eip2930 { accesslist, .. } => { + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } => { *accesslist = list; } } @@ -611,9 +624,9 @@ impl PoolTransaction for MockTransaction { fn access_list(&self) -> Option<&AccessList> { match self { MockTransaction::Legacy { .. } => None, - MockTransaction::Eip1559 { accesslist, .. } | - MockTransaction::Eip4844 { accesslist, .. } | - MockTransaction::Eip2930 { accesslist, .. } => Some(accesslist), + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } => Some(accesslist), } } @@ -735,7 +748,7 @@ impl TryFromRecoveredTransaction for MockTransaction { #[allow(unreachable_patterns)] match transaction.transaction { Transaction::Legacy(TxLegacy { - chain_id: _, + chain_id, nonce, gas_price, gas_limit, @@ -743,6 +756,7 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, }) => Ok(MockTransaction::Legacy { + chain_id, hash, sender, nonce, @@ -753,31 +767,30 @@ impl TryFromRecoveredTransaction for MockTransaction { input, size, }), - Transaction::Eip1559(TxEip1559 { - chain_id: _, + Transaction::Eip2930(TxEip2930 { + chain_id, nonce, + gas_price, gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, to, value, input, access_list, - }) => Ok(MockTransaction::Eip1559 { + }) => Ok(MockTransaction::Eip2930 { + chain_id, hash, sender, nonce, - max_fee_per_gas, - max_priority_fee_per_gas, + gas_price, gas_limit, to, value, input, - accesslist: access_list, + access_list, size, }), - Transaction::Eip4844(TxEip4844 { - chain_id: _, + Transaction::Eip1559(TxEip1559 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -786,42 +799,46 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, access_list, - blob_versioned_hashes: _, - max_fee_per_blob_gas, - }) => Ok(MockTransaction::Eip4844 { + }) => Ok(MockTransaction::Eip1559 { + chain_id, hash, sender, nonce, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, gas_limit, to, value, input, - accesslist: access_list, - sidecar: BlobTransactionSidecar::default(), + access_list, size, }), - Transaction::Eip2930(TxEip2930 { - chain_id: _, + Transaction::Eip4844(TxEip4844 { + chain_id, nonce, - gas_price, gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, to, value, input, access_list, - }) => Ok(MockTransaction::Eip2930 { + blob_versioned_hashes: _, + max_fee_per_blob_gas, + }) => Ok(MockTransaction::Eip4844 { + chain_id, hash, sender, nonce, - gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + max_fee_per_blob_gas, gas_limit, to, value, input, - accesslist: access_list, + access_list, + sidecar: BlobTransactionSidecar::default(), size, }), _ => unreachable!("Invalid transaction type"), @@ -856,6 +873,7 @@ impl From for Transaction { fn from(mock: MockTransaction) -> Self { match mock { MockTransaction::Legacy { + chain_id, hash: _, sender: _, nonce, @@ -865,16 +883,31 @@ impl From for Transaction { value, input, size: _, - } => Self::Legacy(TxLegacy { - chain_id: Some(1), + } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), + MockTransaction::Eip2930 { + chain_id, + hash: _, + sender: _, + nonce, + to, + gas_limit, + input, + value, + gas_price, + access_list, + size: _, + } => Self::Eip2930(TxEip2930 { + chain_id, nonce, gas_price, gas_limit, to, value, + access_list, input, }), MockTransaction::Eip1559 { + chain_id, hash: _, sender: _, nonce, @@ -883,22 +916,23 @@ impl From for Transaction { gas_limit, to, value, - accesslist, + access_list, input, size: _, } => Self::Eip1559(TxEip1559 { - chain_id: 1, + chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, value, - access_list: accesslist, + access_list, input, }), MockTransaction::Eip4844 { - hash, + chain_id, + hash: _, sender: _, nonce, max_fee_per_gas, @@ -907,44 +941,27 @@ impl From for Transaction { gas_limit, to, value, - accesslist, + access_list, input, - sidecar: _, + sidecar, size: _, } => Self::Eip4844(TxEip4844 { - chain_id: 1, + chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, value, - access_list: accesslist, - blob_versioned_hashes: vec![hash], + access_list, + blob_versioned_hashes: sidecar + .commitments + .into_iter() + .map(|commitment| kzg_to_versioned_hash((*commitment).into())) + .collect(), max_fee_per_blob_gas, input, }), - MockTransaction::Eip2930 { - hash: _, - sender: _, - nonce, - to, - gas_limit, - input, - value, - gas_price, - accesslist, - size: _, - } => Self::Eip2930(TxEip2930 { - chain_id: 1, - nonce, - gas_price, - gas_limit, - to, - value, - access_list: accesslist, - input, - }), } } } @@ -958,23 +975,37 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { any::<(Transaction, Address, B256)>() .prop_map(|(tx, sender, tx_hash)| match &tx { Transaction::Legacy(TxLegacy { + chain_id, nonce, gas_price, gas_limit, to, value, input, - .. - }) | + }) => MockTransaction::Legacy { + chain_id: *chain_id, + sender, + hash: tx_hash, + nonce: *nonce, + gas_price: *gas_price, + gas_limit: *gas_limit, + to: *to, + value: *value, + input: input.clone(), + size: tx.size(), + }, + Transaction::Eip2930(TxEip2930 { + chain_id, nonce, gas_price, gas_limit, to, value, + access_list, input, - .. - }) => MockTransaction::Legacy { + }) => MockTransaction::Eip2930 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -982,10 +1013,12 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), + input: input.clone(), + access_list: access_list.clone(), size: tx.size(), }, Transaction::Eip1559(TxEip1559 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -994,8 +1027,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { value, input, access_list, - .. }) => MockTransaction::Eip1559 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -1004,11 +1037,12 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), - accesslist: (*access_list).clone(), + input: input.clone(), + access_list: access_list.clone(), size: tx.size(), }, Transaction::Eip4844(TxEip4844 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -1018,8 +1052,9 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { input, max_fee_per_blob_gas, access_list, - .. + blob_versioned_hashes: _, }) => MockTransaction::Eip4844 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -1029,8 +1064,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), - accesslist: (*access_list).clone(), + input: input.clone(), + access_list: access_list.clone(), // only generate a sidecar if it is a 4844 tx - also for the sake of // performance just use a default sidecar sidecar: BlobTransactionSidecar::default(), From 9ae9af484d4aa3e5f8393654ec0ab110bd940afd Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 1 May 2024 19:44:55 +0100 Subject: [PATCH 153/250] feat(pool): make mock transaction validator eth-compatible (#8034) --- crates/transaction-pool/src/noop.rs | 17 +++---- .../transaction-pool/src/test_utils/mock.rs | 48 ++++++++++++++++--- crates/transaction-pool/src/traits.rs | 32 ++++++++----- crates/transaction-pool/src/validate/eth.rs | 22 +++------ 4 files changed, 77 insertions(+), 42 deletions(-) diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 5f2a11048..b550a2bc1 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -11,10 +11,10 @@ use crate::{ TransactionListenerKind, }, validate::ValidTransaction, - AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, - NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PooledTransactionsElement, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, + EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, + PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, + TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use reth_eth_wire::HandleMempoolData; use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; @@ -252,20 +252,21 @@ pub struct MockTransactionValidator { _marker: PhantomData, } -impl TransactionValidator for MockTransactionValidator { +impl TransactionValidator for MockTransactionValidator { type Transaction = T; async fn validate_transaction( &self, origin: TransactionOrigin, - transaction: Self::Transaction, + mut transaction: Self::Transaction, ) -> TransactionValidationOutcome { + let maybe_sidecar = transaction.take_blob().maybe_sidecar().cloned(); // we return `balance: U256::MAX` to simulate a valid transaction which will never go into // overdraft TransactionValidationOutcome::Valid { balance: U256::MAX, state_nonce: 0, - transaction: ValidTransaction::Valid(transaction), + transaction: ValidTransaction::new(transaction, maybe_sidecar), propagate: match origin { TransactionOrigin::External => true, TransactionOrigin::Local => self.propagate_local, @@ -285,7 +286,7 @@ impl MockTransactionValidator { impl Default for MockTransactionValidator { fn default() -> Self { - MockTransactionValidator { propagate_local: true, _marker: Default::default() } + Self { propagate_local: true, _marker: Default::default() } } } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 108e6073b..bcacff2da 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -4,7 +4,8 @@ use crate::{ identifier::{SenderIdentifiers, TransactionId}, pool::txpool::TxPool, traits::TransactionOrigin, - CoinbaseTipOrdering, PoolTransaction, ValidPoolTransaction, + CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, + ValidPoolTransaction, }; use paste::paste; use rand::{ @@ -15,11 +16,11 @@ use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, eip4844::kzg_to_versioned_hash, transaction::TryFromRecoveredTransactionError, - AccessList, Address, BlobTransactionSidecar, Bytes, ChainId, FromRecoveredPooledTransaction, - IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip1559, - TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, + AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, ChainId, + FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, + TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, + B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -730,7 +731,40 @@ impl PoolTransaction for MockTransaction { /// Returns the chain ID associated with the transaction. fn chain_id(&self) -> Option { - Some(1) + match self { + MockTransaction::Legacy { chain_id, .. } => *chain_id, + + MockTransaction::Eip1559 { chain_id, .. } | + MockTransaction::Eip4844 { chain_id, .. } | + MockTransaction::Eip2930 { chain_id, .. } => Some(*chain_id), + } + } +} + +impl EthPoolTransaction for MockTransaction { + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + match self { + Self::Eip4844 { sidecar, .. } => EthBlobTransactionSidecar::Present(sidecar.clone()), + _ => EthBlobTransactionSidecar::None, + } + } + + fn blob_count(&self) -> usize { + match self { + Self::Eip4844 { sidecar, .. } => sidecar.blobs.len(), + _ => 0, + } + } + + fn validate_blob( + &self, + _blob: &BlobTransactionSidecar, + _settings: &revm::primitives::KzgSettings, + ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + match &self { + Self::Eip4844 { .. } => Ok(()), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), + } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 79b9af698..ca91b00da 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -14,8 +14,8 @@ use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, - SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, - TxHash, TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, + SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxHash, + TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -856,12 +856,7 @@ pub trait EthPoolTransaction: PoolTransaction { fn take_blob(&mut self) -> EthBlobTransactionSidecar; /// Returns the number of blobs this transaction has. - fn blob_count(&self) -> usize { - self.as_eip4844().map(|tx| tx.blob_versioned_hashes.len()).unwrap_or_default() - } - - /// Returns the transaction as EIP-4844 transaction if it is one. - fn as_eip4844(&self) -> Option<&TxEip4844>; + fn blob_count(&self) -> usize; /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( @@ -908,6 +903,16 @@ pub enum EthBlobTransactionSidecar { Present(BlobTransactionSidecar), } +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + EthBlobTransactionSidecar::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + impl EthPooledTransaction { /// Create new instance of [Self]. /// @@ -1096,8 +1101,11 @@ impl EthPoolTransaction for EthPooledTransaction { } } - fn as_eip4844(&self) -> Option<&TxEip4844> { - self.transaction.as_eip4844() + fn blob_count(&self) -> usize { + match &self.transaction.transaction { + Transaction::Eip4844(tx) => tx.blob_versioned_hashes.len(), + _ => 0, + } } fn validate_blob( @@ -1125,13 +1133,13 @@ impl TryFromRecoveredTransaction for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) } unsupported => { // unsupported transaction type return Err(TryFromRecoveredTransactionError::UnsupportedTransactionType( unsupported, - )); + )) } }; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index a07e6fc97..b31a3af48 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -361,25 +361,17 @@ where } } EthBlobTransactionSidecar::Present(blob) => { - if let Some(eip4844) = transaction.as_eip4844() { - // validate the blob - if let Err(err) = eip4844.validate_blob(&blob, &self.kzg_settings) { - return TransactionValidationOutcome::Invalid( - transaction, - InvalidPoolTransactionError::Eip4844( - Eip4844PoolTransactionError::InvalidEip4844Blob(err), - ), - ) - } - // store the extracted blob - maybe_blob_sidecar = Some(blob); - } else { - // this should not happen + // validate the blob + if let Err(err) = transaction.validate_blob(&blob, &self.kzg_settings) { return TransactionValidationOutcome::Invalid( transaction, - InvalidTransactionError::TxTypeNotSupported.into(), + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob(err), + ), ) } + // store the extracted blob + maybe_blob_sidecar = Some(blob); } } } From 9d2ca45c30a1cf13c193abe1e1ae172d0381d9c6 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 1 May 2024 21:23:21 +0200 Subject: [PATCH 154/250] chore(engine): flatten fcu processing (#8027) --- crates/consensus/beacon/src/engine/mod.rs | 149 +++++++++------------- 1 file changed, 59 insertions(+), 90 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 428b95c0b..338a37f02 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -45,7 +45,6 @@ use std::{ use tokio::sync::{ mpsc, mpsc::{UnboundedReceiver, UnboundedSender}, - oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -381,40 +380,6 @@ where None } - /// Called to resolve chain forks and ensure that the Execution layer is working with the latest - /// valid chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option, - ) -> Result { - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - - // Pre-validate forkchoice state update and return if it's invalid or - // cannot be processed at the moment. - if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { - return Ok(on_updated) - } - - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - - let status = self.on_forkchoice_updated_make_canonical_result( - state, - attrs, - make_canonical_result, - elapsed, - )?; - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(status) - } - /// Process the result of attempting to make forkchoice state head hash canonical. /// /// # Returns @@ -519,56 +484,54 @@ where false } - /// Invoked when we receive a new forkchoice update message. + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// - /// Returns `true` if the engine now reached its maximum block number, See - /// [EngineSyncController::has_reached_max_block]. + /// Returns an error if an internal error occurred like a database error. fn on_forkchoice_updated( &mut self, state: ForkchoiceState, attrs: Option, - tx: oneshot::Sender>, - ) -> Result { + ) -> Result { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); + trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - let on_updated = match self.forkchoice_updated(state, attrs) { - Ok(response) => response, - Err(error) => { - if error.is_fatal() { - // FCU resulted in a fatal error from which we can't recover - let err = error.clone(); - let _ = tx.send(Err(RethError::Canonical(error))); - return Err(err) - } - let _ = tx.send(Err(RethError::Canonical(error))); - return Ok(OnForkchoiceUpdateOutcome::Processed) - } - }; - - let fcu_status = on_updated.forkchoice_status(); + // Pre-validate forkchoice state update and return if it's invalid or + // cannot be processed at the moment. + if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { + return Ok(on_updated) + } - // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, fcu_status); + let start = Instant::now(); + let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); + let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - // send the response to the CL ASAP - let _ = tx.send(Ok(on_updated)); + let status = self.on_forkchoice_updated_make_canonical_result( + state, + attrs, + make_canonical_result, + elapsed, + )?; + trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); + Ok(status) + } - match fcu_status { + /// Called after the forkchoice update status has been resolved. + /// Depending on the outcome, the method updates the sync state and notifies the listeners + /// about new processed FCU. + fn on_forkchoice_updated_status(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + match status { ForkchoiceStatus::Invalid => {} ForkchoiceStatus::Valid => { // FCU head is valid, we're no longer syncing self.sync_state_updater.update_sync_state(SyncState::Idle); // node's fully synced, clear active download requests self.sync.clear_block_download_requests(); - - // check if we reached the maximum configured block - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached the maximum user - // configured block. - return Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) - } } ForkchoiceStatus::Syncing => { // we're syncing @@ -577,9 +540,7 @@ where } // notify listeners about new processed FCU - self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, fcu_status)); - - Ok(OnForkchoiceUpdateOutcome::Processed) + self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); } /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less @@ -966,7 +927,7 @@ where /// /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap /// - /// See [Self::forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. + /// See [Self::on_forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. fn on_failed_canonical_forkchoice_update( &mut self, state: &ForkchoiceState, @@ -1758,17 +1719,34 @@ where if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - match this.on_forkchoice_updated(state, payload_attrs, tx) { - Ok(OnForkchoiceUpdateOutcome::Processed) => {} - Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) => { - // reached the max block, we can terminate the future - return Poll::Ready(Ok(())) + match this.on_forkchoice_updated(state, payload_attrs) { + Ok(on_updated) => { + let fcu_status = on_updated.forkchoice_status(); + // update the forkchoice state tracker + this.forkchoice_state_tracker.set_latest(state, fcu_status); + // send the response to the CL ASAP + let _ = tx.send(Ok(on_updated)); + + if fcu_status.is_valid() { + let tip_number = this.blockchain.canonical_tip().number; + if this.sync.has_reached_max_block(tip_number) { + // Terminate the sync early if it's reached the + // maximum user configured block. + return Poll::Ready(Ok(())) + } + } + + this.on_forkchoice_updated_status(state, fcu_status); } - Err(err) => { - // fatal error, we can terminate the future - return Poll::Ready(Err(RethError::Canonical(err).into())) + Err(error) => { + if error.is_fatal() { + // fatal error, we can terminate the future + let _ = tx.send(Err(RethError::Canonical(error.clone()))); + return Poll::Ready(Err(RethError::Canonical(error).into())) + } + let _ = tx.send(Err(RethError::Canonical(error))); } - } + }; } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { this.metrics.new_payload_messages.increment(1); @@ -1828,15 +1806,6 @@ where } } -/// Represents all outcomes of an applied fork choice update. -#[derive(Debug)] -enum OnForkchoiceUpdateOutcome { - /// FCU was processed successfully. - Processed, - /// FCU was processed successfully and reached max block. - ReachedMaxBlock, -} - /// Represents outcomes of processing a sync event #[derive(Debug)] enum SyncEventOutcome { From f94ce6e7800df33a296c1b4be2db9ab812474b08 Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Date: Wed, 1 May 2024 15:37:49 -0400 Subject: [PATCH 155/250] chore: cfg ImportOp behind optimism feature (#8033) --- bin/reth/src/cli/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index e7d278964..40e1f24be 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -1,12 +1,14 @@ //! CLI definition and entrypoint to executable +#[cfg(feature = "optimism")] +use crate::commands::import_op; use crate::{ args::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, import_op, init_cmd, init_state, + config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, @@ -148,6 +150,7 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + #[cfg(feature = "optimism")] Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), @@ -186,6 +189,7 @@ pub enum Commands { #[command(name = "import")] Import(import::ImportCommand), /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. + #[cfg(feature = "optimism")] #[command(name = "import-op")] ImportOp(import_op::ImportOpCommand), /// Dumps genesis block JSON configuration to stdout. From 0aa7d4d05ed5565545002655ddc94083e057977e Mon Sep 17 00:00:00 2001 From: Seva Zhidkov Date: Wed, 1 May 2024 21:52:40 +0100 Subject: [PATCH 156/250] feat(rpc-builder): add tower layer for updating bearer token in auth client (#8010) --- crates/e2e-test-utils/src/engine_api.rs | 5 +- crates/rpc/rpc-builder/src/auth.rs | 41 ++++------ .../rpc/rpc/src/layers/auth_client_layer.rs | 79 +++++++++++++++++++ crates/rpc/rpc/src/layers/mod.rs | 3 + crates/rpc/rpc/src/lib.rs | 5 +- 5 files changed, 106 insertions(+), 27 deletions(-) create mode 100644 crates/rpc/rpc/src/layers/auth_client_layer.rs diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 9ede69e67..fecd9b8b7 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,5 +1,5 @@ use crate::traits::PayloadEnvelopeExt; -use jsonrpsee::http_client::HttpClient; +use jsonrpsee::http_client::{transport::HttpBackend, HttpClient}; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, @@ -10,12 +10,13 @@ use reth::{ }; use reth_payload_builder::PayloadId; use reth_primitives::B256; +use reth_rpc::AuthClientService; use std::marker::PhantomData; /// Helper for engine api operations pub struct EngineApiTestContext { pub canonical_stream: CanonStateNotificationStream, - pub engine_api_client: HttpClient, + pub engine_api_client: HttpClient>, pub _marker: PhantomData, } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 72345aca6..186d61332 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -15,6 +15,7 @@ use jsonrpsee::{ }; pub use reth_ipc::server::Builder as IpcServerBuilder; +use jsonrpsee::http_client::transport::HttpBackend; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_network_api::{NetworkInfo, Peers}; @@ -27,16 +28,13 @@ use reth_rpc::{ cache::EthStateCache, gas_oracle::GasPriceOracle, EthFilterConfig, FeeHistoryCache, FeeHistoryCacheConfig, }, - AuthLayer, Claims, EngineEthApi, EthApi, EthFilter, EthSubscriptionIdProvider, - JwtAuthValidator, JwtSecret, + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, EngineEthApi, EthApi, + EthFilter, EthSubscriptionIdProvider, JwtAuthValidator, JwtSecret, }; use reth_rpc_api::servers::*; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use tower::layer::util::Identity; /// Configure and launch a _standalone_ auth server with `engine` and a _new_ `eth` namespace. @@ -397,32 +395,27 @@ impl AuthServerHandle { format!("ws://{}", self.local_addr) } - fn bearer(&self) -> String { - format!( - "Bearer {}", - self.secret - .encode(&Claims { - iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + - Duration::from_secs(60)) - .as_secs(), - exp: None, - }) - .unwrap() - ) - } - /// Returns a http client connected to the server. - pub fn http_client(&self) -> jsonrpsee::http_client::HttpClient { + pub fn http_client( + &self, + ) -> jsonrpsee::http_client::HttpClient> { + // Create a middleware that adds a new JWT token to every request. + let secret_layer = AuthClientLayer::new(self.secret.clone()); + let middleware = tower::ServiceBuilder::default().layer(secret_layer); jsonrpsee::http_client::HttpClientBuilder::default() - .set_headers(HeaderMap::from_iter([(AUTHORIZATION, self.bearer().parse().unwrap())])) + .set_http_middleware(middleware) .build(self.http_url()) .expect("Failed to create http client") } - /// Returns a ws client connected to the server. + /// Returns a ws client connected to the server. Note that the connection can only be + /// be established within 1 minute due to the JWT token expiration. pub async fn ws_client(&self) -> jsonrpsee::ws_client::WsClient { jsonrpsee::ws_client::WsClientBuilder::default() - .set_headers(HeaderMap::from_iter([(AUTHORIZATION, self.bearer().parse().unwrap())])) + .set_headers(HeaderMap::from_iter([( + AUTHORIZATION, + secret_to_bearer_header(&self.secret), + )])) .build(self.ws_url()) .await .expect("Failed to create ws client") diff --git a/crates/rpc/rpc/src/layers/auth_client_layer.rs b/crates/rpc/rpc/src/layers/auth_client_layer.rs new file mode 100644 index 000000000..4c845796e --- /dev/null +++ b/crates/rpc/rpc/src/layers/auth_client_layer.rs @@ -0,0 +1,79 @@ +use crate::{Claims, JwtSecret}; +use http::HeaderValue; +use hyper::{header::AUTHORIZATION, service::Service}; +use std::{ + task::{Context, Poll}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tower::Layer; + +/// A layer that adds a new JWT token to every request using AuthClientService. +#[derive(Debug)] +pub struct AuthClientLayer { + secret: JwtSecret, +} + +impl AuthClientLayer { + /// Create a new AuthClientLayer with the given `secret`. + pub fn new(secret: JwtSecret) -> Self { + Self { secret } + } +} + +impl Layer for AuthClientLayer { + type Service = AuthClientService; + + fn layer(&self, inner: S) -> Self::Service { + AuthClientService::new(self.secret.clone(), inner) + } +} + +/// Automatically authenticates every client request with the given `secret`. +#[derive(Debug, Clone)] +pub struct AuthClientService { + secret: JwtSecret, + inner: S, +} + +impl AuthClientService { + fn new(secret: JwtSecret, inner: S) -> Self { + Self { secret, inner } + } +} + +impl Service> for AuthClientService +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut request: hyper::Request) -> Self::Future { + request.headers_mut().insert(AUTHORIZATION, secret_to_bearer_header(&self.secret)); + self.inner.call(request) + } +} + +/// Helper function to convert a secret into a Bearer auth header value with claims according to +/// . +/// The token is valid for 60 seconds. +pub fn secret_to_bearer_header(secret: &JwtSecret) -> HeaderValue { + format!( + "Bearer {}", + secret + .encode(&Claims { + iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + + Duration::from_secs(60)) + .as_secs(), + exp: None, + }) + .unwrap() + ) + .parse() + .unwrap() +} diff --git a/crates/rpc/rpc/src/layers/mod.rs b/crates/rpc/rpc/src/layers/mod.rs index ff021a372..83a336e5f 100644 --- a/crates/rpc/rpc/src/layers/mod.rs +++ b/crates/rpc/rpc/src/layers/mod.rs @@ -1,8 +1,11 @@ use http::{HeaderMap, Response}; +mod auth_client_layer; mod auth_layer; mod jwt_secret; mod jwt_validator; + +pub use auth_client_layer::{secret_to_bearer_header, AuthClientLayer, AuthClientService}; pub use auth_layer::AuthLayer; pub use jwt_secret::{Claims, JwtError, JwtSecret}; pub use jwt_validator::JwtAuthValidator; diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index c75fa9b6b..d68f8a018 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -41,7 +41,10 @@ pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; -pub use layers::{AuthLayer, AuthValidator, Claims, JwtAuthValidator, JwtError, JwtSecret}; +pub use layers::{ + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, AuthValidator, Claims, + JwtAuthValidator, JwtError, JwtSecret, +}; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; From 2334317dc725d2153fcefb467b1de07e27740b10 Mon Sep 17 00:00:00 2001 From: 0xKitsune <77890308+0xKitsune@users.noreply.github.com> Date: Wed, 1 May 2024 14:00:57 -0700 Subject: [PATCH 157/250] chore: deduplicate fork timestamp configuration in `ChainSpec` (#8038) --- crates/node-core/src/init.rs | 5 +- crates/primitives/src/chain/mod.rs | 3 +- crates/primitives/src/chain/spec.rs | 167 +------------------------- crates/primitives/src/lib.rs | 4 +- examples/polygon-p2p/src/chain_cfg.rs | 4 +- 5 files changed, 9 insertions(+), 174 deletions(-) diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 883bb437a..92b9f5696 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -479,8 +479,8 @@ mod tests { DatabaseEnv, }; use reth_primitives::{ - Chain, ForkTimestamps, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, - MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH, + Chain, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, + SEPOLIA, SEPOLIA_GENESIS_HASH, }; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; @@ -570,7 +570,6 @@ mod tests { ..Default::default() }, hardforks: BTreeMap::default(), - fork_timestamps: ForkTimestamps::default(), genesis_hash: None, paris_block_and_final_difficulty: None, deposit_contract: None, diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index bf60392cd..b04e88ee0 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -2,8 +2,7 @@ pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, - DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, DEV, GOERLI, HOLESKY, - MAINNET, SEPOLIA, + DisplayHardforks, ForkBaseFeeParams, ForkCondition, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "optimism")] pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index d0a5b8433..823548d27 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -41,7 +41,6 @@ pub static MAINNET: Lazy> = Lazy::new(|| { 15537394, U256::from(58_750_003_716_598_352_816_469u128), )), - fork_timestamps: ForkTimestamps::default().shanghai(1681338455).cancun(1710338135), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(1150000)), @@ -90,7 +89,6 @@ pub static GOERLI: Lazy> = Lazy::new(|| { )), // paris_block_and_final_difficulty: Some((7382818, U256::from(10_790_000))), - fork_timestamps: ForkTimestamps::default().shanghai(1678832736).cancun(1705473120), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -133,7 +131,6 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { )), // paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), - fork_timestamps: ForkTimestamps::default().shanghai(1677557088).cancun(1706655072), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -179,7 +176,6 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { "b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4" )), paris_block_and_final_difficulty: Some((0, U256::from(1))), - fork_timestamps: ForkTimestamps::default().shanghai(1696000704).cancun(1707305664), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -224,7 +220,6 @@ pub static DEV: Lazy> = Lazy::new(|| { "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - fork_timestamps: ForkTimestamps::default().shanghai(0).cancun(0), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -270,11 +265,6 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1704992401) - .canyon(1704992401) - .cancun(1710374401) - .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -322,11 +312,6 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1708534800) - .ecotone(1708534800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -376,11 +361,6 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1708534800) - .ecotone(1708534800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -430,11 +410,6 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1704992401) - .canyon(1704992401) - .cancun(1710374401) - .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -535,12 +510,6 @@ pub struct ChainSpec { #[serde(skip, default)] pub paris_block_and_final_difficulty: Option<(u64, U256)>, - /// Timestamps of various hardforks - /// - /// This caches entries in `hardforks` map - #[serde(skip, default)] - pub fork_timestamps: ForkTimestamps, - /// The active hard forks and their activation conditions pub hardforks: BTreeMap, @@ -565,7 +534,6 @@ impl Default for ChainSpec { genesis_hash: Default::default(), genesis: Default::default(), paris_block_and_final_difficulty: Default::default(), - fork_timestamps: Default::default(), hardforks: Default::default(), deposit_contract: Default::default(), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), @@ -819,28 +787,19 @@ impl ChainSpec { /// Convenience method to check if [Hardfork::Shanghai] is active at a given timestamp. #[inline] pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .shanghai - .map(|shanghai| timestamp >= shanghai) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp) } /// Convenience method to check if [Hardfork::Cancun] is active at a given timestamp. #[inline] pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .cancun - .map(|cancun| timestamp >= cancun) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp) } /// Convenience method to check if [Hardfork::Prague] is active at a given timestamp. #[inline] pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .prague - .map(|prague| timestamp >= prague) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp) } /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. @@ -1084,7 +1043,6 @@ impl From for ChainSpec { chain: genesis.config.chain_id.into(), genesis, genesis_hash: None, - fork_timestamps: ForkTimestamps::from_hardforks(&hardforks), hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1093,94 +1051,6 @@ impl From for ChainSpec { } } -/// Various timestamps of forks -#[derive(Debug, Clone, Default, Eq, PartialEq)] -pub struct ForkTimestamps { - /// The timestamp of the Shanghai fork - pub shanghai: Option, - /// The timestamp of the Cancun fork - pub cancun: Option, - /// The timestamp of the Prague fork - pub prague: Option, - /// The timestamp of the Regolith fork - #[cfg(feature = "optimism")] - pub regolith: Option, - /// The timestamp of the Canyon fork - #[cfg(feature = "optimism")] - pub canyon: Option, - /// The timestamp of the Ecotone fork - #[cfg(feature = "optimism")] - pub ecotone: Option, -} - -impl ForkTimestamps { - /// Creates a new [`ForkTimestamps`] from the given hardforks by extracting the timestamps - fn from_hardforks(forks: &BTreeMap) -> Self { - let mut timestamps = ForkTimestamps::default(); - if let Some(shanghai) = forks.get(&Hardfork::Shanghai).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.shanghai(shanghai); - } - if let Some(cancun) = forks.get(&Hardfork::Cancun).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.cancun(cancun); - } - if let Some(prague) = forks.get(&Hardfork::Prague).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.prague(prague); - } - #[cfg(feature = "optimism")] - { - if let Some(regolith) = forks.get(&Hardfork::Regolith).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.regolith(regolith); - } - if let Some(canyon) = forks.get(&Hardfork::Canyon).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.canyon(canyon); - } - if let Some(ecotone) = forks.get(&Hardfork::Ecotone).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.ecotone(ecotone); - } - } - timestamps - } - - /// Sets the given Shanghai timestamp - pub fn shanghai(mut self, shanghai: u64) -> Self { - self.shanghai = Some(shanghai); - self - } - - /// Sets the given Cancun timestamp - pub fn cancun(mut self, cancun: u64) -> Self { - self.cancun = Some(cancun); - self - } - - /// Sets the given Prague timestamp - pub fn prague(mut self, prague: u64) -> Self { - self.prague = Some(prague); - self - } - - /// Sets the given regolith timestamp - #[cfg(feature = "optimism")] - pub fn regolith(mut self, regolith: u64) -> Self { - self.regolith = Some(regolith); - self - } - - /// Sets the given canyon timestamp - #[cfg(feature = "optimism")] - pub fn canyon(mut self, canyon: u64) -> Self { - self.canyon = Some(canyon); - self - } - - /// Sets the given ecotone timestamp - #[cfg(feature = "optimism")] - pub fn ecotone(mut self, ecotone: u64) -> Self { - self.ecotone = Some(ecotone); - self - } -} - /// A helper type for compatibility with geth's config #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(untagged)] @@ -1418,7 +1288,6 @@ impl ChainSpecBuilder { chain: self.chain.expect("The chain is required"), genesis: self.genesis.expect("The genesis is required"), genesis_hash: None, - fork_timestamps: ForkTimestamps::from_hardforks(&self.hardforks), hardforks: self.hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1839,36 +1708,6 @@ Post-merge hard forks (timestamp based): ); } - // Tests that the ForkTimestamps are correctly set up. - #[test] - fn test_fork_timestamps() { - let spec = ChainSpec::builder().chain(Chain::mainnet()).genesis(Genesis::default()).build(); - assert!(spec.fork_timestamps.shanghai.is_none()); - - let spec = ChainSpec::builder() - .chain(Chain::mainnet()) - .genesis(Genesis::default()) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(1337)) - .build(); - assert_eq!(spec.fork_timestamps.shanghai, Some(1337)); - assert!(spec.is_shanghai_active_at_timestamp(1337)); - assert!(!spec.is_shanghai_active_at_timestamp(1336)); - } - - // Tests that all predefined timestamps are correctly set up in the chainspecs - #[test] - fn test_predefined_chain_spec_fork_timestamps() { - let predefined = [&MAINNET, &SEPOLIA, &HOLESKY, &GOERLI]; - - for spec in predefined.iter() { - let expected_timestamp_forks = &spec.fork_timestamps; - let got_timestamp_forks = ForkTimestamps::from_hardforks(&spec.hardforks); - - // make sure they're the same - assert_eq!(expected_timestamp_forks, &got_timestamp_forks); - } - } - // Tests that we skip any fork blocks in block #0 (the genesis ruleset) #[test] fn ignores_genesis_fork_blocks() { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index d20a35164..ae20cf6b2 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -56,8 +56,8 @@ pub use block::{ }; pub use chain::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainKind, ChainSpec, - ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, - NamedChain, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, + ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, NamedChain, DEV, GOERLI, + HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "zstd-codec")] pub use compression::*; diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index 5a1fadb53..5860cdb1d 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,6 +1,5 @@ use reth_primitives::{ - b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, ForkTimestamps, Hardfork, Head, - NodeRecord, B256, + b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork, Head, NodeRecord, B256, }; use std::{collections::BTreeMap, sync::Arc}; @@ -15,7 +14,6 @@ pub(crate) fn polygon_chain_spec() -> Arc { // genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), - fork_timestamps: ForkTimestamps::default().shanghai(1681338455), paris_block_and_final_difficulty: None, hardforks: BTreeMap::from([ (Hardfork::Petersburg, ForkCondition::Block(0)), From 4f002f6ef19a94d7e0adafabf1c6a1280b202459 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 09:30:44 +0200 Subject: [PATCH 158/250] chore(engine): introduce blockchain tree action (#8029) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/mod.rs | 165 +++++++++++++--------- 1 file changed, 100 insertions(+), 65 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 338a37f02..839bb0278 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -43,8 +43,8 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{ - mpsc, - mpsc::{UnboundedReceiver, UnboundedSender}, + mpsc::{self, UnboundedReceiver, UnboundedSender}, + oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -189,13 +189,11 @@ where payload_builder: PayloadBuilderHandle, /// Validator for execution payloads payload_validator: ExecutionPayloadValidator, - /// Listeners for engine events. - listeners: EventListeners, + /// Current blockchain tree action. + blockchain_tree_action: Option>, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, - /// Consensus engine metrics. - metrics: EngineMetrics, /// After downloading a block corresponding to a recent forkchoice update, the engine will /// check whether or not we can connect the block to the current canonical chain. If we can't, /// we need to download and execute the missing parents of that block. @@ -209,6 +207,10 @@ where /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, hooks: EngineHooksController, + /// Listeners for engine events. + listeners: EventListeners, + /// Consensus engine metrics. + metrics: EngineMetrics, } impl BeaconConsensusEngine @@ -305,11 +307,12 @@ where handle: handle.clone(), forkchoice_state_tracker: Default::default(), payload_builder, - listeners, invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), - metrics: EngineMetrics::default(), + blockchain_tree_action: None, pipeline_run_threshold, hooks: EngineHooksController::new(hooks), + listeners, + metrics: EngineMetrics::default(), }; let maybe_pipeline_target = match target { @@ -496,35 +499,38 @@ where &mut self, state: ForkchoiceState, attrs: Option, - ) -> Result { + tx: oneshot::Sender>, + ) { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - // Pre-validate forkchoice state update and return if it's invalid or - // cannot be processed at the moment. if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { - return Ok(on_updated) + // Pre-validate forkchoice state update and return if it's invalid + // or cannot be processed at the moment. + self.on_forkchoice_updated_status(state, on_updated, tx); + } else { + self.blockchain_tree_action = + Some(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); } - - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - - let status = self.on_forkchoice_updated_make_canonical_result( - state, - attrs, - make_canonical_result, - elapsed, - )?; - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(status) } /// Called after the forkchoice update status has been resolved. /// Depending on the outcome, the method updates the sync state and notifies the listeners /// about new processed FCU. - fn on_forkchoice_updated_status(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + fn on_forkchoice_updated_status( + &mut self, + state: ForkchoiceState, + on_updated: OnForkChoiceUpdated, + tx: oneshot::Sender>, + ) { + // send the response to the CL ASAP + let status = on_updated.forkchoice_status(); + let _ = tx.send(Ok(on_updated)); + + // update the forkchoice state tracker + self.forkchoice_state_tracker.set_latest(state, status); + match status { ForkchoiceStatus::Invalid => {} ForkchoiceStatus::Valid => { @@ -1491,17 +1497,17 @@ where fn on_sync_event( &mut self, event: EngineSyncEvent, - ) -> Result { + ) -> Result { let outcome = match event { EngineSyncEvent::FetchedFullBlock(block) => { self.on_downloaded_block(block); - SyncEventOutcome::Processed + EngineEventOutcome::Processed } EngineSyncEvent::PipelineStarted(target) => { trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); self.metrics.pipeline_runs.increment(1); self.sync_state_updater.update_sync_state(SyncState::Syncing); - SyncEventOutcome::Processed + EngineEventOutcome::Processed } EngineSyncEvent::PipelineFinished { result, reached_max_block } => { trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); @@ -1509,10 +1515,10 @@ where let ctrl = result?; if reached_max_block { // Terminate the sync early if it's reached the maximum user-configured block. - SyncEventOutcome::ReachedMaxBlock + EngineEventOutcome::ReachedMaxBlock } else { self.on_pipeline_outcome(ctrl)?; - SyncEventOutcome::Processed + EngineEventOutcome::Processed } } EngineSyncEvent::PipelineTaskDropped => { @@ -1669,6 +1675,45 @@ where Ok(()) } + + /// Process the outcome of blockchain tree action. + fn on_blockchain_tree_action( + &mut self, + action: BlockchainTreeAction, + ) -> RethResult { + match action { + BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx } => { + let start = Instant::now(); + let result = self.blockchain.make_canonical(state.head_block_hash); + let elapsed = self.record_make_canonical_latency(start, &result); + match self + .on_forkchoice_updated_make_canonical_result(state, attrs, result, elapsed) + { + Ok(on_updated) => { + trace!(target: "consensus::engine", status = ?on_updated, ?state, "Returning forkchoice status"); + let fcu_status = on_updated.forkchoice_status(); + self.on_forkchoice_updated_status(state, on_updated, tx); + + if fcu_status.is_valid() { + let tip_number = self.blockchain.canonical_tip().number; + if self.sync.has_reached_max_block(tip_number) { + // Terminate the sync early if it's reached + // the maximum user configured block. + return Ok(EngineEventOutcome::ReachedMaxBlock) + } + } + } + Err(error) => { + let _ = tx.send(Err(RethError::Canonical(error.clone()))); + if error.is_fatal() { + return Err(RethError::Canonical(error)) + } + } + }; + } + }; + Ok(EngineEventOutcome::Processed) + } } /// On initialization, the consensus engine will poll the message receiver and return @@ -1711,6 +1756,15 @@ where continue } + // Process any blockchain tree action result as set forth during engine message + // processing. + if let Some(action) = this.blockchain_tree_action.take() { + match this.on_blockchain_tree_action(action)? { + EngineEventOutcome::Processed => {} + EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + }; + } + // Process one incoming message from the CL. We don't drain the messages right away, // because we want to sneak a polling of running hook in between them. // @@ -1719,34 +1773,7 @@ where if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - match this.on_forkchoice_updated(state, payload_attrs) { - Ok(on_updated) => { - let fcu_status = on_updated.forkchoice_status(); - // update the forkchoice state tracker - this.forkchoice_state_tracker.set_latest(state, fcu_status); - // send the response to the CL ASAP - let _ = tx.send(Ok(on_updated)); - - if fcu_status.is_valid() { - let tip_number = this.blockchain.canonical_tip().number; - if this.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached the - // maximum user configured block. - return Poll::Ready(Ok(())) - } - } - - this.on_forkchoice_updated_status(state, fcu_status); - } - Err(error) => { - if error.is_fatal() { - // fatal error, we can terminate the future - let _ = tx.send(Err(RethError::Canonical(error.clone()))); - return Poll::Ready(Err(RethError::Canonical(error).into())) - } - let _ = tx.send(Err(RethError::Canonical(error))); - } - }; + this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { this.metrics.new_payload_messages.increment(1); @@ -1770,9 +1797,9 @@ where if let Poll::Ready(sync_event) = this.sync.poll(cx) { match this.on_sync_event(sync_event)? { // Sync event was successfully processed - SyncEventOutcome::Processed => (), + EngineEventOutcome::Processed => (), // Max block has been reached, exit the engine loop - SyncEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), } // this could have taken a while, so we start the next cycle to handle any new @@ -1806,12 +1833,20 @@ where } } -/// Represents outcomes of processing a sync event +enum BlockchainTreeAction { + FcuMakeCanonical { + state: ForkchoiceState, + attrs: Option, + tx: oneshot::Sender>, + }, +} + +/// Represents outcomes of processing an engine event #[derive(Debug)] -enum SyncEventOutcome { - /// Sync event was processed successfully, engine should continue. +enum EngineEventOutcome { + /// Engine event was processed successfully, engine should continue. Processed, - /// Sync event was processed successfully and reached max block. + /// Engine event was processed successfully and reached max block. ReachedMaxBlock, } From bb7f1135d030575ff2a315d091e7f10b682c0ae4 Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Thu, 2 May 2024 05:32:46 -0400 Subject: [PATCH 159/250] feat: write pruning config if --full is present (#7938) --- Cargo.lock | 1 + crates/config/Cargo.toml | 4 +- crates/config/src/config.rs | 31 ++++++++++-- crates/node/builder/Cargo.toml | 3 ++ crates/node/builder/src/launch/common.rs | 61 +++++++++++++++++++++++- 5 files changed, 95 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1ee55cb3..a6d7a7884 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7192,6 +7192,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "tempfile", "tokio", ] diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index ece3fa0bb..d9147d7b7 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -24,7 +24,9 @@ humantime-serde.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -[dev-dependencies] +# toml confy.workspace = true + +[dev-dependencies] tempfile.workspace = true toml.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 7ce947b50..f6537a04c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -6,10 +6,13 @@ use reth_primitives::PruneModes; use secp256k1::SecretKey; use serde::{Deserialize, Deserializer, Serialize}; use std::{ + ffi::OsStr, path::{Path, PathBuf}, time::Duration, }; +const EXTENSION: &str = "toml"; + /// Configuration for the reth node. #[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] @@ -47,6 +50,22 @@ impl Config { .peer_config(peer_config) .discovery(discv4) } + + /// Save the configuration to toml file. + pub fn save(&self, path: &Path) -> Result<(), std::io::Error> { + if path.extension() != Some(OsStr::new(EXTENSION)) { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("reth config file extension must be '{EXTENSION}'"), + )); + } + confy::store_path(path, self).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) + } + + /// Sets the pruning configuration. + pub fn update_prune_confing(&mut self, prune_config: PruneConfig) { + self.prune = Some(prune_config); + } } /// Configuration for each stage in the pipeline. @@ -325,11 +344,9 @@ where #[cfg(test)] mod tests { - use super::Config; + use super::{Config, EXTENSION}; use std::time::Duration; - const EXTENSION: &str = "toml"; - fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); let config_path = temp_dir.path().join(filename).with_extension(EXTENSION); @@ -347,6 +364,14 @@ mod tests { }) } + #[test] + fn test_store_config_method() { + with_tempdir("config-store-test-method", |config_path| { + let config = Config::default(); + config.save(config_path).expect("Failed to store config"); + }) + } + #[test] fn test_load_config() { with_tempdir("config-load-test", |config_path| { diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 270b0dfe5..ef671f127 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -54,3 +54,6 @@ eyre.workspace = true fdlimit.workspace = true confy.workspace = true rayon.workspace = true + +[dev-dependencies] +tempfile.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 63060f647..043b587b8 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -22,7 +22,7 @@ use reth_prune::PrunerBuilder; use reth_rpc::JwtSecret; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{error, info}; +use reth_tracing::tracing::{error, info, warn}; /// Reusable setup for launching a node. /// @@ -66,6 +66,8 @@ impl LaunchContext { let mut toml_config = confy::load_path::(&config_path) .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + Self::save_pruning_config_if_full_node(&mut toml_config, config, &config_path)?; + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Update the config with the command line arguments @@ -81,6 +83,24 @@ impl LaunchContext { Ok(toml_config) } + /// Save prune config to the toml file if node is a full node. + fn save_pruning_config_if_full_node( + reth_config: &mut reth_config::Config, + config: &NodeConfig, + config_path: impl AsRef, + ) -> eyre::Result<()> { + if reth_config.prune.is_none() { + if let Some(prune_config) = config.prune_config() { + reth_config.update_prune_confing(prune_config); + info!(target: "reth::cli", "Saving prune config to toml file"); + reth_config.save(config_path.as_ref())?; + } + } else if config.prune_config().is_none() { + warn!(target: "reth::cli", "Prune configs present in config file but --full not provided. Running as a Full node"); + } + Ok(()) + } + /// Convenience function to [Self::configure_globals] pub fn with_configured_globals(self) -> Self { self.configure_globals(); @@ -456,3 +476,42 @@ pub struct WithConfigs { /// The loaded reth.toml config. pub toml_config: reth_config::Config, } + +#[cfg(test)] +mod tests { + use super::{LaunchContext, NodeConfig}; + use reth_config::Config; + use reth_node_core::args::PruningArgs; + + const EXTENSION: &str = "toml"; + + fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { + let temp_dir = tempfile::tempdir().unwrap(); + let config_path = temp_dir.path().join(filename).with_extension(EXTENSION); + proc(&config_path); + temp_dir.close().unwrap() + } + + #[test] + fn test_save_prune_config() { + with_tempdir("prune-store-test", |config_path| { + let mut reth_config = Config::default(); + let node_config = + NodeConfig { pruning: PruningArgs { full: true }, ..NodeConfig::test() }; + LaunchContext::save_pruning_config_if_full_node( + &mut reth_config, + &node_config, + config_path, + ) + .unwrap(); + + assert_eq!( + reth_config.prune.as_ref().map(|p| p.block_interval), + node_config.prune_config().map(|p| p.block_interval) + ); + + let loaded_config: Config = confy::load_path(config_path).unwrap(); + assert_eq!(reth_config, loaded_config); + }) + } +} From 978be33a9954537b48e5cff1ee53b24a47392341 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 2 May 2024 12:49:54 +0200 Subject: [PATCH 160/250] chore(deps): rm builder dep (#8043) --- Cargo.lock | 2 +- crates/blockchain-tree/Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6d7a7884..2840074de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6453,9 +6453,9 @@ dependencies = [ "parking_lot 0.12.2", "reth-consensus", "reth-db", + "reth-evm-ethereum", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 1757b2939..ecb2e4ef3 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -41,7 +41,7 @@ reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true parking_lot.workspace = true assert_matches.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 09f829c7e..fc9e7685a 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1273,7 +1273,7 @@ mod tests { use linked_hash_set::LinkedHashSet; use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_node_ethereum::EthEvmConfig; + use reth_evm_ethereum::EthEvmConfig; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; #[cfg(feature = "optimism")] From 10ef202d7cea0a46189520413819955abcdb66de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?raster=20=E2=96=A6?= <102927511+raster21@users.noreply.github.com> Date: Thu, 2 May 2024 14:56:21 +0400 Subject: [PATCH 161/250] chore: update 1.0 release ETA to May (#8040) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f5e434ee..47d833712 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ We actively recommend professional node operators to switch to Reth in productio While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. More historical context below: -* We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~April 2024. +* We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~May 2024. * Reth is currently undergoing an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. * Revm (the EVM used in Reth) is undergoing an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). * We are releasing [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. From 7428573d7c5e9a215ee6a20beeb33f1e8ba4e98e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 2 May 2024 13:16:22 +0200 Subject: [PATCH 162/250] feat(discv5): bootstrap cli (#8004) --- Cargo.lock | 1 + bin/reth/src/commands/p2p/mod.rs | 12 +++++- crates/net/discv5/src/config.rs | 55 +++++++++++++++++++++++++++- crates/net/discv5/src/lib.rs | 40 +++++++++++--------- crates/node-core/Cargo.toml | 1 + crates/node-core/src/args/network.rs | 24 ++++++++++++ crates/node-core/src/node_config.rs | 12 +++++- 7 files changed, 125 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2840074de..3110f8ff0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7224,6 +7224,7 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-discv4", + "reth-discv5", "reth-engine-primitives", "reth-evm", "reth-interfaces", diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 35d111e57..1cc5d4f88 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -138,7 +138,14 @@ impl Command { if self.discovery.enable_discv5_discovery { network_config = network_config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.discovery; + let DiscoveryArgs { + discv5_addr, + discv5_port, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.discovery; builder .discv5_config( discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( @@ -147,6 +154,9 @@ impl Command { )))) .build(), ) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) .build() }); } diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 371d40953..3a506902e 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -16,7 +16,18 @@ use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkS /// Default interval in seconds at which to run a lookup up query. /// /// Default is 60 seconds. -const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; +pub const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; + +/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting +/// to 5 seconds). +/// +/// Default is 100 counts. +pub const DEFAULT_COUNT_BOOTSTRAP_LOOKUPS: u64 = 100; + +/// Default duration of look up interval, for pulse look ups at bootstrap. +/// +/// Default is 5 seconds. +pub const DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL: u64 = 5; /// Builds a [`Config`]. #[derive(Debug, Default)] @@ -39,6 +50,11 @@ pub struct ConfigBuilder { other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query to populate kbuckets. lookup_interval: Option, + /// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket + /// population. + bootstrap_lookup_interval: Option, + /// Number of times to run boost lookup queries at start up. + bootstrap_lookup_countdown: Option, /// Custom filter rules to apply to a discovered peer in order to determine if it should be /// passed up to rlpx or dropped. discovered_peer_filter: Option, @@ -54,6 +70,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } = discv5_config; @@ -64,6 +82,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), + bootstrap_lookup_interval: Some(bootstrap_lookup_interval), + bootstrap_lookup_countdown: Some(bootstrap_lookup_countdown), discovered_peer_filter: Some(discovered_peer_filter), } } @@ -135,6 +155,26 @@ impl ConfigBuilder { self } + /// Sets the interval at which to run lookup queries, in order to fill kbuckets. Lookup queries + /// are done periodically at the given interval for the whole run of the program. + pub fn lookup_interval(mut self, seconds: u64) -> Self { + self.lookup_interval = Some(seconds); + self + } + + /// Sets the interval at which to run boost lookup queries at start up. Queries will be started + /// at this interval for the configured number of times after start up. + pub fn bootstrap_lookup_interval(mut self, seconds: u64) -> Self { + self.bootstrap_lookup_interval = Some(seconds); + self + } + + /// Sets the the number of times at which to run boost lookup queries to bootstrap the node. + pub fn bootstrap_lookup_countdown(mut self, counts: u64) -> Self { + self.bootstrap_lookup_countdown = Some(counts); + self + } + /// Adds keys to disallow when filtering a discovered peer, to determine whether or not it /// should be passed to rlpx. The discovered node record is scanned for any kv-pairs where the /// key matches the disallowed keys. If not explicitly set, b"eth2" key will be disallowed. @@ -154,6 +194,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } = self; @@ -163,6 +205,10 @@ impl ConfigBuilder { let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); + let bootstrap_lookup_interval = + bootstrap_lookup_interval.unwrap_or(DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL); + let bootstrap_lookup_countdown = + bootstrap_lookup_countdown.unwrap_or(DEFAULT_COUNT_BOOTSTRAP_LOOKUPS); let discovered_peer_filter = discovered_peer_filter .unwrap_or_else(|| MustNotIncludeKeys::new(&[NetworkStackId::ETH2])); @@ -174,6 +220,8 @@ impl ConfigBuilder { tcp_port, other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } } @@ -197,6 +245,11 @@ pub struct Config { pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query with to populate kbuckets. pub(super) lookup_interval: u64, + /// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket + /// population. + pub(super) bootstrap_lookup_interval: u64, + /// Number of times to run boost lookup queries at start up. + pub(super) bootstrap_lookup_countdown: u64, /// Custom filter rules to apply to a discovered peer in order to determine if it should be /// passed up to rlpx or dropped. pub(super) discovered_peer_filter: MustNotIncludeKeys, diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 14793fab0..b8b2eab24 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -37,7 +37,10 @@ pub mod network_stack_id; pub use discv5::{self, IpMode}; -pub use config::{BootNode, Config, ConfigBuilder}; +pub use config::{ + BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, +}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; @@ -45,17 +48,6 @@ pub use network_stack_id::NetworkStackId; use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; -/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting -/// to 5 seconds). -/// -/// Default is 100 counts. -pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; - -/// Default duration of look up interval, for pulse look ups at bootstrap. -/// -/// Default is 5 seconds. -pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; - /// Max kbucket index is 255. /// /// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See . @@ -180,7 +172,13 @@ impl Discv5 { // 2. start discv5 // let Config { - discv5_config, bootstrap_nodes, lookup_interval, discovered_peer_filter, .. + discv5_config, + bootstrap_nodes, + lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, + discovered_peer_filter, + .. } = discv5_config; let EnrCombinedKeyWrapper(enr) = enr.into(); @@ -206,7 +204,13 @@ impl Discv5 { // // 4. start bg kbuckets maintenance // - Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); + Self::spawn_populate_kbuckets_bg( + lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, + metrics.clone(), + discv5.clone(), + ); Ok(( Self { discv5, ip_mode, fork_key, discovered_peer_filter, metrics }, @@ -319,6 +323,8 @@ impl Discv5 { /// Backgrounds regular look up queries, in order to keep kbuckets populated. fn spawn_populate_kbuckets_bg( lookup_interval: u64, + bootstrap_lookup_interval: u64, + bootstrap_lookup_countdown: u64, metrics: Discv5Metrics, discv5: Arc, ) { @@ -327,18 +333,18 @@ impl Discv5 { let lookup_interval = Duration::from_secs(lookup_interval); let metrics = metrics.discovered_peers; let mut kbucket_index = MAX_KBUCKET_INDEX; - let pulse_lookup_interval = Duration::from_secs(DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL); + let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); // todo: graceful shutdown async move { // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest // log2distance from local node - for i in (0..DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP).rev() { + for i in (0..bootstrap_lookup_countdown).rev() { let target = discv5::enr::NodeId::random(); trace!(target: "net::discv5", %target, - bootstrap_boost_runs_count_down=i, + bootstrap_boost_runs_countdown=i, lookup_interval=format!("{:#?}", pulse_lookup_interval), "starting bootstrap boost lookup query" ); diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 4bce2908d..157b44970 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -27,6 +27,7 @@ reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true reth-discv4.workspace = true +reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-api.workspace = true reth-evm.workspace = true diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 59dc6ceba..df6f8ece8 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -7,6 +7,10 @@ use reth_discv4::{ DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT, DEFAULT_DISCOVERY_V5_ADDR, DEFAULT_DISCOVERY_V5_PORT, }; +use reth_discv5::{ + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + DEFAULT_SECONDS_LOOKUP_INTERVAL, +}; use reth_net_nat::NatResolver; use reth_network::{ transactions::{ @@ -235,6 +239,23 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT", default_value_t = DEFAULT_DISCOVERY_V5_PORT)] pub discv5_port: u16, + + /// The interval in seconds at which to carry out periodic lookup queries, for the whole + /// run of the program. + #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", + default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] + pub discv5_lookup_interval: u64, + + /// The interval in seconds at which to carry out boost lookup queries, for a fixed number of + /// times, at bootstrap. + #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", + default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)] + pub discv5_bootstrap_lookup_interval: u64, + + /// The number of times to carry out boost lookup queries at bootstrap. + #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", + default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] + pub discv5_bootstrap_lookup_countdown: u64, } impl DiscoveryArgs { @@ -278,6 +299,9 @@ impl Default for DiscoveryArgs { port: DEFAULT_DISCOVERY_PORT, discv5_addr: DEFAULT_DISCOVERY_V5_ADDR, discv5_port: DEFAULT_DISCOVERY_V5_PORT, + discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL, + discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, } } } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 411a8b447..3f149a824 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -476,7 +476,14 @@ impl NodeConfig { // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.network.discovery; + let DiscoveryArgs { + discv5_addr, + discv5_port, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.network.discovery; builder .discv5_config( discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( @@ -485,6 +492,9 @@ impl NodeConfig { )))) .build(), ) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) .build() }) } From aba48a5505e027295c9ebf996df7cbb2a3bd81d7 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 2 May 2024 13:02:51 +0100 Subject: [PATCH 163/250] perf: add `ETL` to `init_from_state_dump` (#8022) --- Cargo.lock | 127 +++++++-------- bin/reth/src/commands/init_state.rs | 10 +- crates/node-core/Cargo.toml | 2 + crates/node-core/src/init.rs | 148 ++++++++++++------ crates/storage/codecs/Cargo.toml | 3 +- .../codecs/src/alloy/genesis_account.rs | 67 ++++++++ crates/storage/codecs/src/alloy/mod.rs | 1 + .../storage/db/src/tables/codecs/compact.rs | 4 +- .../src/bundle_state/state_reverts.rs | 24 +-- 9 files changed, 251 insertions(+), 135 deletions(-) create mode 100644 crates/storage/codecs/src/alloy/genesis_account.rs diff --git a/Cargo.lock b/Cargo.lock index 3110f8ff0..0a831ae80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -514,7 +514,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-json-rpc", - "base64 0.22.0", + "base64 0.22.1", "futures-util", "futures-utils-wasm", "serde", @@ -550,7 +550,7 @@ dependencies = [ "arbitrary", "derive_arbitrary", "derive_more", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "nybbles", "proptest", "proptest-derive", @@ -1006,9 +1006,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -1220,7 +1220,7 @@ dependencies = [ "cfg-if", "dashmap", "fast-float", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "icu_normalizer", "indexmap 2.2.6", "intrusive-collections", @@ -1255,7 +1255,7 @@ checksum = "c055ef3cd87ea7db014779195bc90c6adfc35de4902e3b2fe587adecbd384578" dependencies = [ "boa_macros", "boa_profiler", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "thin-vec", ] @@ -1267,7 +1267,7 @@ checksum = "0cacc9caf022d92195c827a3e5bf83f96089d4bfaff834b359ac7b6be46e9187" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "indexmap 2.2.6", "once_cell", "phf", @@ -1479,9 +1479,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", @@ -2248,7 +2248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core 0.9.10", @@ -2256,15 +2256,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2272,9 +2272,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -3015,9 +3015,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -3364,9 +3364,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3379,7 +3379,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3388,7 +3388,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3626,7 +3626,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3712,7 +3712,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -4032,7 +4032,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -4127,7 +4127,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -4204,9 +4204,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b0e68d9af1f066c06d6e2397583795b912d78537d7d907c561e82c13d69fa1" +checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4222,9 +4222,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f254f56af1ae84815b9b1325094743dcf05b92abb5e94da2e81a35cff0cada" +checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" dependencies = [ "futures-channel", "futures-util", @@ -4246,9 +4246,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "274d68152c24aa78977243bb56f28d7946e6aa309945b37d33174a3f92d89a3a" +checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" dependencies = [ "anyhow", "async-trait", @@ -4272,9 +4272,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" +checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" dependencies = [ "async-trait", "hyper 0.14.28", @@ -4292,9 +4292,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c326f9e95aeff7d707b2ffde72c22a52acc975ba1c48587776c02b90c4747a6" +checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.1.0", @@ -4305,9 +4305,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b5bfbda5f8fb63f997102fd18f73e35e34c84c6dcdbdbbe72c6e48f6d2c959b" +checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" dependencies = [ "futures-util", "http 0.2.12", @@ -4329,9 +4329,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc828e537868d6b12bbb07ec20324909a22ced6efca0057c825c3e1126b2c6d" +checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" dependencies = [ "anyhow", "beef", @@ -4342,9 +4342,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf8dcee48f383e24957e238240f997ec317ba358b4e6d2e8be3f745bcdabdb5" +checksum = "f448d8eacd945cc17b6c0b42c361531ca36a962ee186342a97cdb8fca679cd77" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4353,9 +4353,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f00abe918bf34b785f87459b9205790e5361a3f7437adb50e928dc243f27eb" +checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" dependencies = [ "http 0.2.12", "jsonrpsee-client-transport", @@ -4443,9 +4443,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libffi" @@ -4727,7 +4727,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -5476,7 +5476,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "serde", ] @@ -6178,7 +6178,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "memchr", ] @@ -6226,7 +6226,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "futures-core", "futures-util", @@ -6481,6 +6481,7 @@ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ "alloy-eips", + "alloy-genesis", "alloy-primitives", "arbitrary", "bytes", @@ -7220,12 +7221,14 @@ dependencies = [ "proptest", "rand 0.8.5", "reth-beacon-consensus", + "reth-codecs", "reth-config", "reth-consensus-common", "reth-db", "reth-discv4", "reth-discv5", "reth-engine-primitives", + "reth-etl", "reth-evm", "reth-interfaces", "reth-metrics", @@ -8003,7 +8006,7 @@ dependencies = [ "derive_more", "dyn-clone", "enumn", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "once_cell", "serde", @@ -8088,9 +8091,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" +checksum = "b26f4c25a604fcb3a1bcd96dd6ba37c93840de95de8198d94c0d571a74a804d1" dependencies = [ "bytemuck", "byteorder", @@ -8285,7 +8288,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] @@ -8582,11 +8585,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -8600,9 +8603,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8848,9 +8851,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9359,7 +9362,7 @@ dependencies = [ "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index fa70264e5..e0558be32 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -8,6 +8,7 @@ use crate::{ dirs::{DataDirPath, MaybePlatformPath}, }; use clap::Parser; +use reth_config::config::EtlConfig; use reth_db::{database::Database, init_db}; use reth_node_core::init::{init_from_state_dump, init_genesis}; use reth_primitives::{ChainSpec, B256}; @@ -78,11 +79,15 @@ impl InitStateCommand { info!(target: "reth::cli", "Database opened"); let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; + let etl_config = EtlConfig::new( + Some(EtlConfig::from_datadir(data_dir.data_dir())), + EtlConfig::default_file_size(), + ); info!(target: "reth::cli", "Writing genesis block"); let hash = match self.state { - Some(path) => init_at_state(path, provider_factory)?, + Some(path) => init_at_state(path, provider_factory, etl_config)?, None => init_genesis(provider_factory)?, }; @@ -95,6 +100,7 @@ impl InitStateCommand { pub fn init_at_state( state_dump_path: PathBuf, factory: ProviderFactory, + etl_config: EtlConfig, ) -> eyre::Result { info!(target: "reth::cli", path=?state_dump_path, @@ -103,5 +109,5 @@ pub fn init_at_state( let file = File::open(state_dump_path)?; let reader = BufReader::new(file); - init_from_state_dump(reader, factory) + init_from_state_dump(reader, factory, etl_config) } diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 157b44970..3caf5d9d1 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -36,6 +36,8 @@ reth-tasks.workspace = true reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true +reth-etl.workspace = true +reth-codecs.workspace = true # ethereum discv5.workspace = true diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 92b9f5696..8a7751e4e 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -1,10 +1,13 @@ //! Reth genesis initialization utility functions. +use reth_codecs::Compact; +use reth_config::config::EtlConfig; use reth_db::{ database::Database, tables, transaction::{DbTx, DbTxMut}, }; +use reth_etl::Collector; use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ stage::StageId, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, @@ -293,10 +296,16 @@ pub fn insert_genesis_header( Ok(()) } -/// Initialize chain with state at specific block, from reader of state dump. +/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can +/// be found on database. +/// +/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can +/// be set to the highest block present. One practical usecase is to import OP mainnet state at +/// bedrock transition block. pub fn init_from_state_dump( mut reader: impl BufRead, factory: ProviderFactory, + etl_config: EtlConfig, ) -> eyre::Result { let block = factory.last_block_number()?; let hash = factory.block_hash(block)?.unwrap(); @@ -307,47 +316,115 @@ pub fn init_from_state_dump( "Initializing state at block" ); - let mut total_inserted_accounts = 0; - let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); - let mut chunk_total_byte_len = 0; - let mut line = String::new(); - // first line can be state root, then it can be used for verifying against computed state root + let expected_state_root = parse_state_root(&mut reader)?; + + // remaining lines are accounts + let collector = parse_accounts(&mut reader, etl_config)?; + + // write state to db + let mut provider_rw = factory.provider_rw()?; + dump_state(collector, &mut provider_rw, block)?; + + // compute and compare state root. this advances the stage checkpoints. + let computed_state_root = compute_state_root(&provider_rw)?; + if computed_state_root != expected_state_root { + error!(target: "reth::cli", + ?computed_state_root, + ?expected_state_root, + "Computed state root does not match state root in state dump" + ); + + Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? + } else { + info!(target: "reth::cli", + ?computed_state_root, + "Computed state root matches state root in state dump" + ); + } + + provider_rw.commit()?; + + Ok(hash) +} + +/// Parses and returns expected state root. +fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result { + let mut line = String::new(); reader.read_line(&mut line)?; - let expected_state_root = serde_json::from_str::(&line)?.root; + let expected_state_root = serde_json::from_str::(&line)?.root; trace!(target: "reth::cli", root=%expected_state_root, "Read state root from file" ); + Ok(expected_state_root) +} - line.clear(); +/// Parses accounts and pushes them to a [`Collector`]. +fn parse_accounts( + mut reader: impl BufRead, + etl_config: EtlConfig, +) -> Result, eyre::Error> { + let mut line = String::new(); + let mut collector = Collector::new(etl_config.file_size, etl_config.dir); - // remaining lines are accounts - let mut provider_rw = factory.provider_rw()?; while let Ok(n) = reader.read_line(&mut line) { - chunk_total_byte_len += n; - if DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK <= chunk_total_byte_len || n == 0 { - // acc + if n == 0 { + break; + } + + let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; + collector.insert(address, genesis_account)?; + + if !collector.is_empty() && collector.len() % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0 + { + info!(target: "reth::cli", + parsed_new_accounts=collector.len(), + ); + } + + line.clear(); + } + + Ok(collector) +} + +/// Takes a [`Collector`] and processes all accounts. +fn dump_state( + mut collector: Collector, + provider_rw: &mut DatabaseProviderRW, + block: u64, +) -> Result<(), eyre::Error> { + let accounts_len = collector.len(); + let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); + let mut total_inserted_accounts = 0; + + for (index, entry) in collector.iter()?.enumerate() { + let (address, account) = entry?; + let (address, _) = Address::from_compact(address.as_slice(), address.len()); + let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len()); + + accounts.push((address, account)); + + if (index > 0 && index % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0) || + index == accounts_len - 1 + { total_inserted_accounts += accounts.len(); info!(target: "reth::cli", - chunk_total_byte_len, - parsed_new_accounts=accounts.len(), total_inserted_accounts, "Writing accounts to db" ); - // reset - chunk_total_byte_len = 0; - // use transaction to insert genesis header insert_genesis_hashes( - &provider_rw, + provider_rw, accounts.iter().map(|(address, account)| (address, account)), )?; + insert_history( - &provider_rw, + provider_rw, accounts.iter().map(|(address, account)| (address, account)), block, )?; @@ -363,37 +440,8 @@ pub fn init_from_state_dump( accounts.clear(); } - - if n == 0 { - break; - } - - let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; - accounts.push((address, genesis_account)); - - line.clear(); } - - // compute and compare state root. this advances the stage checkpoints. - let computed_state_root = compute_state_root(&provider_rw)?; - if computed_state_root != expected_state_root { - error!(target: "reth::cli", - ?computed_state_root, - ?expected_state_root, - "Computed state root does not match state root in state dump" - ); - - Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? - } else { - info!(target: "reth::cli", - ?computed_state_root, - "Computed state root matches state root in state dump" - ); - } - - provider_rw.commit()?; - - Ok(hash) + Ok(()) } /// Computes the state root (from scratch) based on the accounts and storages present in the diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index ab8f1a323..958ccf917 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -16,6 +16,7 @@ reth-codecs-derive = { path = "./derive", default-features = false } # eth alloy-eips = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } alloy-primitives.workspace = true # misc @@ -36,5 +37,5 @@ proptest-derive.workspace = true [features] default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] -alloy = ["dep:alloy-eips", "dep:modular-bitfield"] +alloy = ["dep:alloy-eips", "dep:alloy-genesis", "dep:modular-bitfield"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs new file mode 100644 index 000000000..619d9db51 --- /dev/null +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -0,0 +1,67 @@ +use crate::Compact; +use alloy_genesis::GenesisAccount as AlloyGenesisAccount; +use alloy_primitives::{Bytes, B256, U256}; +use reth_codecs_derive::main_codec; + +/// GenesisAccount acts as bridge which simplifies Compact implementation for AlloyGenesisAccount. +/// +/// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct GenesisAccount { + /// The nonce of the account at genesis. + nonce: Option, + /// The balance of the account at genesis. + balance: U256, + /// The account's bytecode at genesis. + code: Option, + /// The account's storage at genesis. + storage: Option, + /// The account's private key. Should only be used for testing. + private_key: Option, +} + +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct StorageEntries { + entries: Vec, +} + +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct StorageEntry { + key: B256, + value: B256, +} + +impl Compact for AlloyGenesisAccount { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let account = GenesisAccount { + nonce: self.nonce, + balance: self.balance, + code: self.code, + storage: self.storage.map(|s| StorageEntries { + entries: s.into_iter().map(|(key, value)| StorageEntry { key, value }).collect(), + }), + private_key: self.private_key, + }; + account.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (account, _) = GenesisAccount::from_compact(buf, len); + let alloy_account = AlloyGenesisAccount { + nonce: account.nonce, + balance: account.balance, + code: account.code, + storage: account + .storage + .map(|s| s.entries.into_iter().map(|entry| (entry.key, entry.value)).collect()), + private_key: account.private_key, + }; + (alloy_account, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index aff164642..664ab2607 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,4 +1,5 @@ mod access_list; +mod genesis_account; mod log; mod txkind; mod withdrawal; diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index 452f5c632..aed8d97ef 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -50,7 +50,9 @@ impl_compression_for_compact!( CompactU256, StageCheckpoint, PruneCheckpoint, - ClientVersion + ClientVersion, + // Non-DB + GenesisAccount ); macro_rules! impl_compression_fixed_compact { diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 006f87b40..cc16a50cc 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,6 +1,6 @@ use rayon::slice::ParallelSliceMut; use reth_db::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, models::{AccountBeforeTx, BlockNumberAddress}, tables, transaction::{DbTx, DbTxMut}, @@ -75,30 +75,16 @@ impl StateReverts { tracing::trace!(target: "provider::reverts", "Writing account changes"); let mut account_changeset_cursor = tx.cursor_dup_write::()?; - // append entries if key is new - let should_append_accounts = - account_changeset_cursor.last()?.map_or(true, |(block_number, _)| { - block_number < first_block || block_number == first_block && block_number == 0 - }); for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; // Sort accounts by address. account_block_reverts.par_sort_by_key(|a| a.0); for (address, info) in account_block_reverts { - if should_append_accounts { - account_changeset_cursor.append_dup( - block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, - )?; - } else { - // upsert on dupsort tables will append to subkey. see implementation of - // DbCursorRW::upsert for reth_db::implementation::mdbx::cursor::Cursor - account_changeset_cursor.upsert( - block_number, - AccountBeforeTx { address, info: info.map(into_reth_acc) }, - )?; - } + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; } } From f6649c31b2947a0be9807ff3e21a4827553df3fb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 14:43:37 +0100 Subject: [PATCH 164/250] docs(libmdbx): `static_files` -> `snapshots` (#8046) --- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/libmdbx-rs/src/flags.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 218196c49..03afb4784 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -976,7 +976,7 @@ mod tests { } // Insert more data in the database, so we hit the DB size limit error, and MDBX tries to - // kick long-lived readers and delete their static_files + // kick long-lived readers and delete their snapshots { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index e6b2697a8..843ae161c 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -25,7 +25,7 @@ pub enum SyncMode { /// /// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system /// buffers to disk when committing a transaction. But there is a huge difference in how - /// are recycled the MVCC static_files corresponding to previous "steady" transactions (see + /// are recycled the MVCC snapshots corresponding to previous "steady" transactions (see /// below). /// /// With [crate::EnvironmentKind::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use From 14d91c3ba0b043e6ab9e97ca9c76936bdb40f594 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 2 May 2024 15:58:17 +0200 Subject: [PATCH 165/250] fix: make discv4 packets adhere to eip-8 (#8039) --- Cargo.lock | 1 + crates/ethereum-forks/src/forkid.rs | 75 ++++++++++- crates/net/discv4/Cargo.toml | 8 +- crates/net/discv4/src/proto.rs | 199 +++++++++++++++++++++++++++- 4 files changed, 275 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a831ae80..2c587c8c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6583,6 +6583,7 @@ name = "reth-discv4" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", + "assert_matches", "discv5", "enr", "generic-array", diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index 3be3e3ab8..ee4edb8bd 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -4,7 +4,7 @@ use crate::Head; use alloy_primitives::{hex, BlockNumber, B256}; -use alloy_rlp::*; +use alloy_rlp::{Error as RlpError, *}; #[cfg(any(test, feature = "arbitrary"))] use arbitrary::Arbitrary; use crc::*; @@ -116,19 +116,51 @@ pub struct ForkId { } /// Represents a forward-compatible ENR entry for including the forkid in a node record via -/// EIP-868. Forward compatibility is achieved by allowing trailing fields. +/// EIP-868. Forward compatibility is achieved via EIP-8. /// /// See: /// /// /// for how geth implements ForkId values and forward compatibility. -#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[rlp(trailing)] +#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable)] pub struct EnrForkIdEntry { /// The inner forkid pub fork_id: ForkId, } +impl Decodable for EnrForkIdEntry { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { fork_id: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + impl From for EnrForkIdEntry { fn from(fork_id: ForkId) -> Self { Self { fork_id } @@ -652,4 +684,39 @@ mod tests { assert!(fork_filter.set_head_priv(Head { number: b2, ..Default::default() }).is_some()); assert_eq!(fork_filter.current(), h2); } + + mod eip8 { + use super::*; + + fn junk_enr_fork_id_entry() -> Vec { + let mut buf = Vec::new(); + // enr request is just an expiration + let fork_id = ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE }; + + // add some junk + let junk: u64 = 112233; + + // rlp header encoding + let payload_length = fork_id.length() + junk.length(); + alloy_rlp::Header { list: true, payload_length }.encode(&mut buf); + + // fields + fork_id.encode(&mut buf); + junk.encode(&mut buf); + + buf + } + + #[test] + fn eip8_decode_enr_fork_id_entry() { + let enr_fork_id_entry_with_junk = junk_enr_fork_id_entry(); + + let mut buf = enr_fork_id_entry_with_junk.as_slice(); + let decoded = EnrForkIdEntry::decode(&mut buf).unwrap(); + assert_eq!( + decoded.fork_id, + ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE } + ); + } + } } diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index bd7e99ee6..49e9b4ecc 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -21,7 +21,12 @@ reth-network-types.workspace = true # ethereum alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", + "serde", +] } enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } @@ -36,6 +41,7 @@ generic-array = "0.14" serde = { workspace = true, optional = true } [dev-dependencies] +assert_matches.workspace = true rand.workspace = true tokio = { workspace = true, features = ["macros"] } reth-tracing.workspace = true diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index da84dc05a..62dd9235d 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -215,7 +215,7 @@ impl NodeEndpoint { } /// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03). -#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)] pub struct FindNode { /// The target node's ID, a 64-byte secp256k1 public key. pub id: PeerId, @@ -223,8 +223,41 @@ pub struct FindNode { pub expire: u64, } +impl Decodable for FindNode { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { id: Decodable::decode(b)?, expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [Neighbours packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#neighbors-packet-0x04). -#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)] pub struct Neighbours { /// The list of nodes containing IP, UDP port, TCP port, and node ID. pub nodes: Vec, @@ -232,16 +265,82 @@ pub struct Neighbours { pub expire: u64, } +impl Decodable for Neighbours { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { nodes: Decodable::decode(b)?, expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [ENRRequest packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrrequest-packet-0x05). /// /// This packet is used to request the current version of a node's Ethereum Node Record (ENR). -#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)] pub struct EnrRequest { /// The expiration timestamp for the request. No reply should be sent if it refers to a time in /// the past. pub expire: u64, } +impl Decodable for EnrRequest { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [ENRResponse packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrresponse-packet-0x06). /// /// This packet is used to respond to an ENRRequest packet and includes the requested ENR along with @@ -442,6 +541,7 @@ mod tests { test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message}, DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; + use assert_matches::assert_matches; use enr::EnrPublicKey; use rand::{thread_rng, Rng, RngCore}; use reth_primitives::{hex, ForkHash}; @@ -769,4 +869,97 @@ mod tests { ); assert!(decoded_enr.verify()); } + + mod eip8 { + use super::*; + + fn junk_enr_request() -> Vec { + let mut buf = Vec::new(); + // enr request is just an expiration + let expire: u64 = 123456; + + // add some junk + let junk: u64 = 112233; + + // rlp header encoding + let payload_length = expire.length() + junk.length(); + alloy_rlp::Header { list: true, payload_length }.encode(&mut buf); + + // fields + expire.encode(&mut buf); + junk.encode(&mut buf); + + buf + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + #[test] + fn eip8_decode_enr_request() { + let enr_request_with_junk = junk_enr_request(); + + let mut buf = enr_request_with_junk.as_slice(); + let decoded = EnrRequest::decode(&mut buf).unwrap(); + assert_eq!(decoded.expire, 123456); + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + // + // test vector from eip-8: https://eips.ethereum.org/EIPS/eip-8 + #[test] + fn eip8_decode_findnode() { + let findnode_with_junk = hex!("c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396"); + + let buf = findnode_with_junk.as_slice(); + let decoded = Message::decode(buf).unwrap(); + + let expected_id = hex!("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"); + assert_matches!(decoded.msg, Message::FindNode(FindNode { id, expire: 1136239445 }) if id == expected_id); + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + // + // test vector from eip-8: https://eips.ethereum.org/EIPS/eip-8 + #[test] + fn eip8_decode_neighbours() { + let neighbours_with_junk = hex!("c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db8403155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d313198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df738443b9a355010203b525a138aa34383fec3d2719a0"); + + let buf = neighbours_with_junk.as_slice(); + let decoded = Message::decode(buf).unwrap(); + + let _ = NodeRecord { + address: "99.33.22.55".parse().unwrap(), + tcp_port: 4444, + udp_port: 4445, + id: hex!("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32").into(), + }.length(); + + let expected_nodes: Vec = vec![ + NodeRecord { + address: "99.33.22.55".parse().unwrap(), + tcp_port: 4444, + udp_port: 4445, + id: hex!("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32").into(), + }, + NodeRecord { + address: "1.2.3.4".parse().unwrap(), + tcp_port: 1, + udp_port: 1, + id: hex!("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db").into(), + }, + NodeRecord { + address: "2001:db8:3c4d:15::abcd:ef12".parse().unwrap(), + tcp_port: 3333, + udp_port: 3333, + id: hex!("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac").into(), + }, + NodeRecord { + address: "2001:db8:85a3:8d3:1319:8a2e:370:7348".parse().unwrap(), + tcp_port: 999, + udp_port: 1000, + id: hex!("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73").into(), + }, + ]; + assert_matches!(decoded.msg, Message::Neighbours(Neighbours { nodes, expire: 1136239445 }) if nodes == expected_nodes); + } + } } From 2eee1920ea658ccd93f1ebe0961f6ca72ec1f7e7 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Thu, 2 May 2024 16:10:40 +0200 Subject: [PATCH 166/250] fix: check for oob offset access in nippy jar (#8037) --- crates/storage/nippy-jar/src/cursor.rs | 4 ++-- crates/storage/nippy-jar/src/error.rs | 5 +++++ crates/storage/nippy-jar/src/lib.rs | 19 ++++++++++++------- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 984206c36..541fcfa63 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -213,13 +213,13 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { ) -> Result<(), NippyJarError> { // Find out the offset of the column value let offset_pos = self.row as usize * self.jar.columns + column; - let value_offset = self.reader.offset(offset_pos) as usize; + let value_offset = self.reader.offset(offset_pos)? as usize; let column_offset_range = if self.jar.rows * self.jar.columns == offset_pos + 1 { // It's the last column of the last row value_offset..self.reader.size() } else { - let next_value_offset = self.reader.offset(offset_pos + 1) as usize; + let next_value_offset = self.reader.offset(offset_pos + 1)? as usize; value_offset..next_value_offset }; diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index 3763be3dc..d44777058 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -42,6 +42,11 @@ pub enum NippyJarError { /// The read offset size in number of bytes. offset_size: u64, }, + #[error("attempted to read an out of bounds offset: {index}")] + OffsetOutOfBounds { + /// The index of the offset that was being read. + index: usize, + }, #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, #[error("dictionary is not loaded.")] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 59fc586e4..1cecdba40 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -498,7 +498,7 @@ impl DataReader { } /// Returns the offset for the requested data index - pub fn offset(&self, index: usize) -> u64 { + pub fn offset(&self, index: usize) -> Result { // + 1 represents the offset_len u8 which is in the beginning of the file let from = index * self.offset_size as usize + 1; @@ -512,7 +512,7 @@ impl DataReader { if offsets_file_size > 1 { let from = offsets_file_size - self.offset_size as usize * (index + 1); - Ok(self.offset_at(from)) + self.offset_at(from) } else { Ok(0) } @@ -525,11 +525,16 @@ impl DataReader { } /// Reads one offset-sized (determined by the offset file) u64 at the provided index. - fn offset_at(&self, index: usize) -> u64 { + fn offset_at(&self, index: usize) -> Result { let mut buffer: [u8; 8] = [0; 8]; - buffer[..self.offset_size as usize] - .copy_from_slice(&self.offset_mmap[index..(index + self.offset_size as usize)]); - u64::from_le_bytes(buffer) + + let offset_end = index + self.offset_size as usize; + if offset_end > self.offset_mmap.len() { + return Err(NippyJarError::OffsetOutOfBounds { index }); + } + + buffer[..self.offset_size as usize].copy_from_slice(&self.offset_mmap[index..offset_end]); + Ok(u64::from_le_bytes(buffer)) } /// Returns number of bytes that represent one offset. @@ -1292,7 +1297,7 @@ mod tests { let data_reader = nippy.open_data_reader().unwrap(); // there are only two valid offsets. so index 2 actually represents the expected file // data size. - assert_eq!(data_reader.offset(2), expected_data_size as u64); + assert_eq!(data_reader.offset(2).unwrap(), expected_data_size as u64); } // This should prune from the ondisk offset list and clear the jar. From 78f62dd34c8c6c1b0a045990d4bf0d235a9d32b2 Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Date: Thu, 2 May 2024 10:30:04 -0400 Subject: [PATCH 167/250] feat: add spawn_replay_transaction to EthTransactions (#8036) Co-authored-by: Oliver Nordbjerg --- crates/rpc/rpc/src/eth/api/transactions.rs | 55 ++++++++++++++++++++++ crates/rpc/rpc/src/otterscan.rs | 13 ++--- 2 files changed, 59 insertions(+), 9 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 15e2b6f56..51bde5bfa 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -376,6 +376,20 @@ pub trait EthTransactions: Send + Sync { .await } + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and + /// the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool). + async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> + where + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, + R: Send + 'static; + /// Retrieves the transaction if it exists and returns its trace. /// /// Before the transaction is traced, all previous transaction in the block are applied to the @@ -1173,6 +1187,47 @@ where Ok(block.map(|block| (transaction, block.seal(block_hash)))) } + async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> + where + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, + R: Send + 'static, + { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, tx_env_with_recovered(&tx)); + + let (res, _) = this.transact(&mut db, env)?; + f(tx_info, res, db) + }) + .await + .map(Some) + } + async fn spawn_trace_transaction_in_block_with_inspector( &self, hash: B256, diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index bdfbc1293..2f62e66a3 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,7 +1,6 @@ use alloy_primitives::Bytes; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use revm::inspectors::NoOpInspector; use revm_inspectors::transfer::{TransferInspector, TransferKind}; use revm_primitives::ExecutionResult; @@ -81,14 +80,10 @@ where async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult> { let maybe_revert = self .eth - .spawn_trace_transaction_in_block_with_inspector( - tx_hash, - NoOpInspector, - |_tx_info, _inspector, res, _| match res.result { - ExecutionResult::Revert { output, .. } => Ok(Some(output)), - _ => Ok(None), - }, - ) + .spawn_replay_transaction(tx_hash, |_tx_info, res, _| match res.result { + ExecutionResult::Revert { output, .. } => Ok(Some(output)), + _ => Ok(None), + }) .await .map(Option::flatten)?; Ok(maybe_revert) From 1603113ce5c0a99f9af6f5a42d0d11a2d64a0d4f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 15:54:34 +0100 Subject: [PATCH 168/250] docs(libmdbx): more `static_files` -> `snapshots` (#8047) --- crates/storage/libmdbx-rs/src/environment.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 03afb4784..ba7385b94 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -950,8 +950,7 @@ mod tests { .open(tempdir.path()) .unwrap(); - // Insert some data in the database, so the read transaction can lock on the static file of - // it + // Insert some data in the database, so the read transaction can lock on the snapshot of it { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); @@ -964,8 +963,7 @@ mod tests { // Create a read transaction let _tx_ro = env.begin_ro_txn().unwrap(); - // Change previously inserted data, so the read transaction would use the previous static - // file + // Change previously inserted data, so the read transaction would use the previous snapshot { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); From e68ab2f58c93259f4e0c25411bf36d2b3db3cba0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 17:17:28 +0200 Subject: [PATCH 169/250] refactor: engine interceptors (#8048) --- Cargo.lock | 3 + .../src/commands/debug_cmd/replay_engine.rs | 11 ++- crates/consensus/beacon/src/engine/mod.rs | 32 ++++----- crates/node-core/Cargo.toml | 2 + .../engine_store.rs} | 62 +++++++++++----- crates/node-core/src/engine/mod.rs | 71 +++++++++++++++++++ crates/node-core/src/engine/skip_fcu.rs | 64 +++++++++++++++++ crates/node-core/src/engine_skip_fcu.rs | 55 -------------- crates/node-core/src/lib.rs | 3 +- crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/launch/mod.rs | 43 ++++------- 11 files changed, 217 insertions(+), 130 deletions(-) rename crates/node-core/src/{engine_api_store.rs => engine/engine_store.rs} (78%) create mode 100644 crates/node-core/src/engine/mod.rs create mode 100644 crates/node-core/src/engine/skip_fcu.rs delete mode 100644 crates/node-core/src/engine_skip_fcu.rs diff --git a/Cargo.lock b/Cargo.lock index 2c587c8c7..fcb0dfa5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7196,6 +7196,7 @@ dependencies = [ "reth-transaction-pool", "tempfile", "tokio", + "tokio-stream", ] [[package]] @@ -7218,6 +7219,7 @@ dependencies = [ "metrics-process", "metrics-util", "once_cell", + "pin-project", "procfs", "proptest", "rand 0.8.5", @@ -7256,6 +7258,7 @@ dependencies = [ "thiserror", "tikv-jemalloc-ctl", "tokio", + "tokio-util", "tracing", "vergen", ] diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 1360c2f1b..947c12745 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -19,7 +19,7 @@ use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; +use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; #[cfg(not(feature = "optimism"))] use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; @@ -34,7 +34,7 @@ use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_transaction_pool::noop::NoopTransactionPool; use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use tracing::*; /// `reth debug replay-engine` command @@ -191,8 +191,7 @@ impl Command { // Configure the consensus engine let network_client = network.fetch_client().await?; - let (consensus_engine_tx, consensus_engine_rx) = mpsc::unbounded_channel(); - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( + let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::new( network_client, Pipeline::builder().build( provider_factory.clone(), @@ -210,8 +209,6 @@ impl Command { payload_builder, None, u64::MAX, - consensus_engine_tx, - consensus_engine_rx, EngineHooks::new(), )?; info!(target: "reth::cli", "Consensus engine initialized"); @@ -224,7 +221,7 @@ impl Command { let _ = tx.send(res); }); - let engine_api_store = EngineApiStore::new(self.engine_api_store.clone()); + let engine_api_store = EngineMessageStore::new(self.engine_api_store.clone()); for filepath in engine_api_store.engine_messages_iter()? { let contents = fs::read(&filepath).wrap_err(format!("failed to read: {}", filepath.display()))?; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 839bb0278..a7761615c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,18 +1,10 @@ -use crate::{ - engine::{ - forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, - metrics::EngineMetrics, - }, - hooks::{EngineHookContext, EngineHooksController}, - sync::{EngineSyncController, EngineSyncEvent}, -}; -use futures::{Future, StreamExt}; +use futures::{stream::BoxStream, Future, StreamExt}; use reth_db::database::Database; use reth_engine_primitives::{EngineTypes, PayloadAttributes, PayloadBuilderAttributes}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, + BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, executor::BlockValidationError, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, @@ -21,6 +13,7 @@ use reth_interfaces::{ RethError, RethResult, }; use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, B256, @@ -43,7 +36,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{ - mpsc::{self, UnboundedReceiver, UnboundedSender}, + mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -68,18 +61,19 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; -use crate::hooks::{EngineHookEvent, EngineHooks, PolledHook}; pub use forkchoice::ForkchoiceStatus; -use reth_interfaces::blockchain_tree::BlockValidationKind; -use reth_payload_validator::ExecutionPayloadValidator; +use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}; mod metrics; +use metrics::EngineMetrics; pub(crate) mod sync; +use sync::{EngineSyncController, EngineSyncEvent}; /// Hooks for running during the main loop of /// [consensus engine][`crate::engine::BeaconConsensusEngine`]. pub mod hooks; +use hooks::{EngineHookContext, EngineHookEvent, EngineHooks, EngineHooksController, PolledHook}; #[cfg(test)] pub mod test_utils; @@ -180,7 +174,7 @@ where /// Used for emitting updates about whether the engine is syncing or not. sync_state_updater: Box, /// The Engine API message receiver. - engine_message_rx: UnboundedReceiverStream>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, /// A clone of the handle handle: BeaconConsensusEngineHandle, /// Tracks the received forkchoice state updates received by the CL. @@ -254,7 +248,7 @@ where target, pipeline_run_threshold, to_engine, - rx, + Box::pin(UnboundedReceiverStream::from(rx)), hooks, ) } @@ -284,7 +278,7 @@ where target: Option, pipeline_run_threshold: u64, to_engine: UnboundedSender>, - rx: UnboundedReceiver>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, hooks: EngineHooks, ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { let handle = BeaconConsensusEngineHandle { to_engine }; @@ -303,7 +297,7 @@ where payload_validator: ExecutionPayloadValidator::new(blockchain.chain_spec()), blockchain, sync_state_updater, - engine_message_rx: UnboundedReceiverStream::new(rx), + engine_message_stream, handle: handle.clone(), forkchoice_state_tracker: Default::default(), payload_builder, @@ -1770,7 +1764,7 @@ where // // These messages can affect the state of the SyncController and they're also time // sensitive, hence they are polled first. - if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { + if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { this.on_forkchoice_updated(state, payload_attrs, tx); diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index 3caf5d9d1..e19b4d242 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -44,6 +44,8 @@ discv5.workspace = true # async tokio.workspace = true +tokio-util.workspace = true +pin-project.workspace = true # metrics metrics-exporter-prometheus = "0.12.1" diff --git a/crates/node-core/src/engine_api_store.rs b/crates/node-core/src/engine/engine_store.rs similarity index 78% rename from crates/node-core/src/engine_api_store.rs rename to crates/node-core/src/engine/engine_store.rs index 5552137f6..524e2c89b 100644 --- a/crates/node-core/src/engine_api_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -1,5 +1,6 @@ //! Stores engine API messages to disk for later inspection and replay. +use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; use reth_primitives::fs; @@ -8,8 +9,13 @@ use reth_rpc_types::{ ExecutionPayload, }; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, path::PathBuf, time::SystemTime}; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use std::{ + collections::BTreeMap, + path::PathBuf, + pin::Pin, + task::{ready, Context, Poll}, + time::SystemTime, +}; use tracing::*; /// A message from the engine API that has been stored to disk. @@ -34,13 +40,13 @@ pub enum StoredEngineApiMessage { /// This can read and write engine API messages in a specific directory. #[derive(Debug)] -pub struct EngineApiStore { +pub struct EngineMessageStore { /// The path to the directory that stores the engine API messages. path: PathBuf, } -impl EngineApiStore { - /// Creates a new [EngineApiStore] at the given path. +impl EngineMessageStore { + /// Creates a new [EngineMessageStore] at the given path. /// /// The path is expected to be a directory, where individual message JSON files will be stored. pub fn new(path: PathBuf) -> Self { @@ -108,22 +114,42 @@ impl EngineApiStore { } Ok(filenames_by_ts.into_iter().flat_map(|(_, paths)| paths)) } +} - /// Intercepts an incoming engine API message, storing it to disk and forwarding it to the - /// engine channel. - pub async fn intercept( - self, - mut rx: UnboundedReceiver>, - to_engine: UnboundedSender>, - ) where - Engine: EngineTypes, - BeaconEngineMessage: std::fmt::Debug, - { - while let Some(msg) = rx.recv().await { - if let Err(error) = self.on_message(&msg, SystemTime::now()) { +/// A wrapper stream that stores Engine API messages in +/// the specified directory. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineStoreStream { + /// Inner message stream. + #[pin] + stream: S, + /// Engine message store. + store: EngineMessageStore, +} + +impl EngineStoreStream { + /// Create new engine store stream wrapper. + pub fn new(stream: S, path: PathBuf) -> Self { + Self { stream, store: EngineMessageStore::new(path) } + } +} + +impl Stream for EngineStoreStream +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + let next = ready!(this.stream.poll_next_unpin(cx)); + if let Some(msg) = &next { + if let Err(error) = this.store.on_message(msg, SystemTime::now()) { error!(target: "engine::intercept", ?msg, %error, "Error handling Engine API message"); } - let _ = to_engine.send(msg); } + Poll::Ready(next) } } diff --git a/crates/node-core/src/engine/mod.rs b/crates/node-core/src/engine/mod.rs new file mode 100644 index 000000000..4ba8479e9 --- /dev/null +++ b/crates/node-core/src/engine/mod.rs @@ -0,0 +1,71 @@ +//! Collection of various stream utilities for consensus engine. + +use futures::Stream; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use std::path::PathBuf; +use tokio_util::either::Either; + +pub mod engine_store; +use engine_store::EngineStoreStream; + +pub mod skip_fcu; +use skip_fcu::EngineSkipFcu; + +/// The collection of stream extensions for engine API message stream. +pub trait EngineMessageStreamExt: + Stream> +{ + /// Skips the specified number of [BeaconEngineMessage::ForkchoiceUpdated] messages from the + /// engine message stream. + fn skip_fcu(self, count: usize) -> EngineSkipFcu + where + Self: Sized, + { + EngineSkipFcu::new(self, count) + } + + /// If the count is [Some], returns the stream that skips the specified number of + /// [BeaconEngineMessage::ForkchoiceUpdated] messages. Otherwise, returns `Self`. + fn maybe_skip_fcu(self, maybe_count: Option) -> Either, Self> + where + Self: Sized, + { + if let Some(count) = maybe_count { + Either::Left(self.skip_fcu(count)) + } else { + Either::Right(self) + } + } + + /// Stores engine messages at the specified location. + fn store_messages(self, path: PathBuf) -> EngineStoreStream + where + Self: Sized, + { + EngineStoreStream::new(self, path) + } + + /// If the path is [Some], returns the stream that stores engine messages at the specified + /// location. Otherwise, returns `Self`. + fn maybe_store_messages( + self, + maybe_path: Option, + ) -> Either, Self> + where + Self: Sized, + { + if let Some(path) = maybe_path { + Either::Left(self.store_messages(path)) + } else { + Either::Right(self) + } + } +} + +impl EngineMessageStreamExt for T +where + Engine: EngineTypes, + T: Stream>, +{ +} diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/node-core/src/engine/skip_fcu.rs new file mode 100644 index 000000000..34004134f --- /dev/null +++ b/crates/node-core/src/engine/skip_fcu.rs @@ -0,0 +1,64 @@ +//! Stores engine API messages to disk for later inspection and replay. + +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; +use reth_engine_primitives::EngineTypes; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// Engine API stream wrapper that skips the specified number of forkchoice updated messages. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineSkipFcu { + #[pin] + stream: S, + /// The number of FCUs to skip. + threshold: usize, + /// Current count of skipped FCUs. + skipped: usize, +} + +impl EngineSkipFcu { + /// Creates new [EngineSkipFcu] stream wrapper. + pub fn new(stream: S, threshold: usize) -> Self { + Self { + stream, + threshold, + // Start with `threshold` so that the first FCU goes through. + skipped: threshold, + } + } +} + +impl Stream for EngineSkipFcu +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + loop { + let next = ready!(this.stream.poll_next_unpin(cx)); + let item = match next { + Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + if this.skipped < this.threshold { + *this.skipped += 1; + tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); + let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); + continue + } else { + *this.skipped = 0; + Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + } + } + next => next, + }; + return Poll::Ready(item) + } + } +} diff --git a/crates/node-core/src/engine_skip_fcu.rs b/crates/node-core/src/engine_skip_fcu.rs deleted file mode 100644 index a6e5e1b01..000000000 --- a/crates/node-core/src/engine_skip_fcu.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Stores engine API messages to disk for later inspection and replay. - -use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; - -/// Intercept Engine API message and skip FCUs. -#[derive(Debug)] -pub struct EngineApiSkipFcu { - /// The number of FCUs to skip. - threshold: usize, - /// Current count of skipped FCUs. - skipped: usize, -} - -impl EngineApiSkipFcu { - /// Creates new [EngineApiSkipFcu] interceptor. - pub fn new(threshold: usize) -> Self { - Self { - threshold, - // Start with `threshold` so that the first FCU goes through. - skipped: threshold, - } - } - - /// Intercepts an incoming engine API message, skips FCU or forwards it - /// to the engine depending on current number of skipped FCUs. - pub async fn intercept( - mut self, - mut rx: UnboundedReceiver>, - to_engine: UnboundedSender>, - ) where - Engine: EngineTypes, - BeaconEngineMessage: std::fmt::Debug, - { - while let Some(msg) = rx.recv().await { - if let BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } = msg { - if self.skipped < self.threshold { - self.skipped += 1; - tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=self.threshold, skipped=self.skipped, "Skipping FCU"); - let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); - } else { - self.skipped = 0; - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - }); - } - } else { - let _ = to_engine.send(msg); - } - } - } -} diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index 3d73e0e61..024467ab1 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -11,8 +11,7 @@ pub mod args; pub mod cli; pub mod dirs; -pub mod engine_api_store; -pub mod engine_skip_fcu; +pub mod engine; pub mod exit; pub mod init; pub mod metrics; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index ef671f127..136c27d7c 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -47,6 +47,7 @@ tokio = { workspace = true, features = [ "time", "rt-multi-thread", ] } +tokio-stream.workspace = true ## misc aquamarine.workspace = true diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index bd81f8386..a372bedf0 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -14,7 +14,8 @@ use reth_beacon_consensus::{ BeaconConsensus, BeaconConsensusEngine, }; use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, + noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, }; use reth_consensus::Consensus; use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; @@ -23,8 +24,7 @@ use reth_network::NetworkEvents; use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, - engine_api_store::EngineApiStore, - engine_skip_fcu::EngineApiSkipFcu, + engine::EngineMessageStreamExt, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; @@ -37,10 +37,10 @@ use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; use std::{future::Future, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; +use tokio_stream::wrappers::UnboundedReceiverStream; pub mod common; pub use common::LaunchContext; -use reth_blockchain_tree::noop::NoopBlockchainTree; /// A general purpose trait that launches a new node of any kind. /// @@ -261,29 +261,15 @@ where // create pipeline let network_client = node_adapter.network().fetch_client().await?; - let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); + let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); - if let Some(skip_fcu_threshold) = ctx.node_config().debug.skip_fcu { - debug!(target: "reth::cli", "spawning skip FCU task"); - let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); - let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - ctx.task_executor().spawn_critical( - "skip FCU interceptor", - engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), - ); - consensus_engine_rx = skip_fcu_rx; - } - - if let Some(store_path) = ctx.node_config().debug.engine_api_store.clone() { - debug!(target: "reth::cli", "spawning engine API store"); - let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); - let engine_api_store = EngineApiStore::new(store_path); - ctx.task_executor().spawn_critical( - "engine api interceptor", - engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), - ); - consensus_engine_rx = engine_intercept_rx; - }; + let node_config = ctx.node_config(); + let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) + .maybe_skip_fcu(node_config.debug.skip_fcu) + // Store messages _after_ skipping messages so that `replay-engine` command + // would replay the exact same messages that were observed by the engine + // during this run. + .maybe_store_messages(node_config.debug.engine_api_store.clone()); let max_block = ctx.max_block(network_client.clone()).await?; let mut hooks = EngineHooks::new(); @@ -303,8 +289,7 @@ where info!(target: "reth::cli", "Starting Reth in dev mode"); for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, -address.to_string(), format_ether(alloc.balance)); + info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); } // install auto-seal @@ -395,7 +380,7 @@ address.to_string(), format_ether(alloc.balance)); initial_target, reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, consensus_engine_tx, - consensus_engine_rx, + Box::pin(consensus_engine_stream), hooks, )?; info!(target: "reth::cli", "Consensus engine initialized"); From 2af2f0ba46ff9f1324f755bb50f24a2604445e55 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 17:22:23 +0200 Subject: [PATCH 170/250] chore(engine): new payload blockchain tree action (#8041) --- crates/consensus/beacon/src/engine/mod.rs | 193 +++++++++++++--------- 1 file changed, 114 insertions(+), 79 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index a7761615c..e8b27dc57 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -504,8 +504,10 @@ where // or cannot be processed at the moment. self.on_forkchoice_updated_status(state, on_updated, tx); } else { - self.blockchain_tree_action = - Some(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); } } @@ -1030,13 +1032,17 @@ where &mut self, payload: ExecutionPayload, cancun_fields: Option, - ) -> Result { + tx: oneshot::Sender>, + ) { + self.metrics.new_payload_messages.increment(1); + let block = match self.ensure_well_formed_payload(payload, cancun_fields) { Ok(block) => block, - Err(status) => return Ok(status), + Err(status) => { + let _ = tx.send(Ok(status)); + return + } }; - let block_hash = block.hash(); - let block_num_hash = block.num_hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash()); if lowest_buffered_ancestor == block.hash() { @@ -1047,74 +1053,14 @@ where if let Some(status) = self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash()) { - return Ok(status) + let _ = tx.send(Ok(status)); + return } - let res = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it holds - // exclusive access to the database - self.try_insert_new_payload(block) - } else { - self.try_buffer_payload(block) - }; - - let status = match res { - Ok(status) => { - if status.is_valid() { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // if we're currently syncing and the inserted block is the targeted FCU - // head block, we can try to make it canonical. - if block_hash == target.head_block_hash { - if let Err((_hash, error)) = - self.try_make_sync_target_canonical(block_num_hash) - { - return if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error"); - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } else { - // If we could not make the sync target block canonical, we - // should return the error as an invalid payload status. - Ok(PayloadStatus::new( - PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }, - // TODO: return a proper latest valid hash - // - // See: - self.forkchoice_state_tracker.last_valid_head(), - )) - } - } - } - } - // block was successfully inserted, so we can cancel the full block request, if - // any exists - self.sync.cancel_full_block_request(block_hash); - } - Ok(status) - } - Err(error) => { - warn!(target: "consensus::engine", %error, "Error while processing payload"); - - // If the error was due to an invalid payload, the payload is added to the invalid - // headers cache and `Ok` with [PayloadStatusEnum::Invalid] is returned. - let (block, error) = error.split(); - if error.is_invalid_block() { - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - let latest_valid_hash = - self.latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); - // keep track of the invalid header - self.invalid_headers.insert(block.header); - let status = PayloadStatusEnum::Invalid { validation_error: error.to_string() }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } else { - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } - } - }; - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - status + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::InsertNewPayload { block, tx }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); } /// Ensures that the given payload does not violate any consensus rules that concern the block's @@ -1670,7 +1616,9 @@ where Ok(()) } - /// Process the outcome of blockchain tree action. + /// Process the next set blockchain tree action. + /// The handler might set next blockchain tree action to perform, + /// so the state change should be handled accordingly. fn on_blockchain_tree_action( &mut self, action: BlockchainTreeAction, @@ -1705,6 +1653,84 @@ where } }; } + BlockchainTreeAction::InsertNewPayload { block, tx } => { + let block_hash = block.hash(); + let block_num_hash = block.num_hash(); + let result = if self.sync.is_pipeline_idle() { + // we can only insert new payloads if the pipeline is _not_ running, because it + // holds exclusive access to the database + self.try_insert_new_payload(block) + } else { + self.try_buffer_payload(block) + }; + + let status = match result { + Ok(status) => status, + Err(error) => { + warn!(target: "consensus::engine", %error, "Error while processing payload"); + + let (block, error) = error.split(); + if !error.is_invalid_block() { + // TODO: revise if any error should be considered fatal at this point. + let _ = + tx.send(Err(BeaconOnNewPayloadError::Internal(Box::new(error)))); + return Ok(EngineEventOutcome::Processed) + } + + // If the error was due to an invalid payload, the payload is added to the + // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is + // returned. + warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); + let latest_valid_hash = self + .latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); + // keep track of the invalid header + self.invalid_headers.insert(block.header); + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + latest_valid_hash, + ) + } + }; + + if status.is_valid() { + if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { + // if we're currently syncing and the inserted block is the targeted + // FCU head block, we can try to make it canonical. + if block_hash == target.head_block_hash { + if let Err((_hash, error)) = + self.try_make_sync_target_canonical(block_num_hash) + { + if error.is_fatal() { + let response = Err(BeaconOnNewPayloadError::Internal( + Box::new(error.clone()), + )); + let _ = tx.send(response); + return Err(RethError::Canonical(error)) + } + + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + let status = Ok(PayloadStatus::new( + PayloadStatusEnum::Invalid { + validation_error: error.to_string(), + }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + )); + let _ = tx.send(status); + return Ok(EngineEventOutcome::Processed) + } + } + } + // block was successfully inserted, so we can cancel the full block + // request, if any exists + self.sync.cancel_full_block_request(block_hash); + } + + trace!(target: "consensus::engine", ?status, "Returning payload status"); + let _ = tx.send(Ok(status)); + } }; Ok(EngineEventOutcome::Processed) } @@ -1753,10 +1779,17 @@ where // Process any blockchain tree action result as set forth during engine message // processing. if let Some(action) = this.blockchain_tree_action.take() { - match this.on_blockchain_tree_action(action)? { - EngineEventOutcome::Processed => {} - EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + match this.on_blockchain_tree_action(action) { + Ok(EngineEventOutcome::Processed) => {} + Ok(EngineEventOutcome::ReachedMaxBlock) => return Poll::Ready(Ok(())), + Err(error) => { + error!(target: "consensus::engine", %error, "Encountered fatal error"); + return Poll::Ready(Err(error.into())) + } }; + + // Blockchain tree action handler might set next action to take. + continue } // Process one incoming message from the CL. We don't drain the messages right away, @@ -1770,9 +1803,7 @@ where this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - this.metrics.new_payload_messages.increment(1); - let res = this.on_new_payload(payload, cancun_fields); - let _ = tx.send(res); + this.on_new_payload(payload, cancun_fields, tx); } BeaconEngineMessage::TransitionConfigurationExchanged => { this.blockchain.on_transition_configuration_exchanged(); @@ -1833,6 +1864,10 @@ enum BlockchainTreeAction { attrs: Option, tx: oneshot::Sender>, }, + InsertNewPayload { + block: SealedBlock, + tx: oneshot::Sender>, + }, } /// Represents outcomes of processing an engine event From 7845c9c897ff4781c33067f646c1237fdece49aa Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Thu, 2 May 2024 21:39:00 +0530 Subject: [PATCH 171/250] minor typo fix (#8052) Co-authored-by: Matthias Seitz --- crates/net/network/tests/it/multiplex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/tests/it/multiplex.rs b/crates/net/network/tests/it/multiplex.rs index aac55a982..650b75423 100644 --- a/crates/net/network/tests/it/multiplex.rs +++ b/crates/net/network/tests/it/multiplex.rs @@ -22,7 +22,7 @@ use std::{ use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -/// A simple Rplx subprotocol for +/// A simple Rlpx subprotocol that sends pings and pongs mod proto { use super::*; use reth_eth_wire::capability::Capability; From 5378dd79e226172cd96c60b5e438f051cdc2a469 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 19:01:32 +0200 Subject: [PATCH 172/250] feat: new payload skipper (#8050) --- crates/node-core/src/args/debug.rs | 4 ++ crates/node-core/src/engine/mod.rs | 28 +++++++++ crates/node-core/src/engine/skip_fcu.rs | 2 +- .../node-core/src/engine/skip_new_payload.rs | 60 +++++++++++++++++++ crates/node/builder/src/launch/mod.rs | 5 +- 5 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 crates/node-core/src/engine/skip_new_payload.rs diff --git a/crates/node-core/src/args/debug.rs b/crates/node-core/src/args/debug.rs index 916b4a1ef..3eda71ad0 100644 --- a/crates/node-core/src/args/debug.rs +++ b/crates/node-core/src/args/debug.rs @@ -63,6 +63,10 @@ pub struct DebugArgs { #[arg(long = "debug.skip-fcu", help_heading = "Debug")] pub skip_fcu: Option, + /// If provided, the engine will skip `n` consecutive new payloads. + #[arg(long = "debug.skip-new-payload", help_heading = "Debug")] + pub skip_new_payload: Option, + /// The path to store engine API messages at. /// If specified, all of the intercepted engine API messages /// will be written to specified location. diff --git a/crates/node-core/src/engine/mod.rs b/crates/node-core/src/engine/mod.rs index 4ba8479e9..2c4e12e68 100644 --- a/crates/node-core/src/engine/mod.rs +++ b/crates/node-core/src/engine/mod.rs @@ -12,6 +12,9 @@ use engine_store::EngineStoreStream; pub mod skip_fcu; use skip_fcu::EngineSkipFcu; +pub mod skip_new_payload; +use skip_new_payload::EngineSkipNewPayload; + /// The collection of stream extensions for engine API message stream. pub trait EngineMessageStreamExt: Stream> @@ -38,6 +41,31 @@ pub trait EngineMessageStreamExt: } } + /// Skips the specified number of [BeaconEngineMessage::NewPayload] messages from the + /// engine message stream. + fn skip_new_payload(self, count: usize) -> EngineSkipNewPayload + where + Self: Sized, + { + EngineSkipNewPayload::new(self, count) + } + + /// If the count is [Some], returns the stream that skips the specified number of + /// [BeaconEngineMessage::NewPayload] messages. Otherwise, returns `Self`. + fn maybe_skip_new_payload( + self, + maybe_count: Option, + ) -> Either, Self> + where + Self: Sized, + { + if let Some(count) = maybe_count { + Either::Left(self.skip_new_payload(count)) + } else { + Either::Right(self) + } + } + /// Stores engine messages at the specified location. fn store_messages(self, path: PathBuf) -> EngineStoreStream where diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/node-core/src/engine/skip_fcu.rs index 34004134f..6deb34263 100644 --- a/crates/node-core/src/engine/skip_fcu.rs +++ b/crates/node-core/src/engine/skip_fcu.rs @@ -1,4 +1,4 @@ -//! Stores engine API messages to disk for later inspection and replay. +//! Stream wrapper that skips specified number of FCUs. use futures::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/node-core/src/engine/skip_new_payload.rs new file mode 100644 index 000000000..ea5cf61e9 --- /dev/null +++ b/crates/node-core/src/engine/skip_new_payload.rs @@ -0,0 +1,60 @@ +//! Stream wrapper that skips specified number of new payload messages. + +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_rpc_types::engine::{PayloadStatus, PayloadStatusEnum}; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// Engine API stream wrapper that skips the specified number of new payload messages. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineSkipNewPayload { + #[pin] + stream: S, + /// The number of messages to skip. + threshold: usize, + /// Current count of skipped messages. + skipped: usize, +} + +impl EngineSkipNewPayload { + /// Creates new [EngineSkipNewPayload] stream wrapper. + pub fn new(stream: S, threshold: usize) -> Self { + Self { stream, threshold, skipped: 0 } + } +} + +impl Stream for EngineSkipNewPayload +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + loop { + let next = ready!(this.stream.poll_next_unpin(cx)); + let item = match next { + Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { + if this.skipped < this.threshold { + *this.skipped += 1; + tracing::warn!(target: "engine::intercept", ?payload, ?cancun_fields, threshold=this.threshold, skipped=this.skipped, "Skipping new payload"); + let _ = tx.send(Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing))); + continue + } else { + *this.skipped = 0; + Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) + } + } + next => next, + }; + return Poll::Ready(item) + } + } +} diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index a372bedf0..cd93dbe5e 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -266,8 +266,9 @@ where let node_config = ctx.node_config(); let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) .maybe_skip_fcu(node_config.debug.skip_fcu) - // Store messages _after_ skipping messages so that `replay-engine` command - // would replay the exact same messages that were observed by the engine + .maybe_skip_new_payload(node_config.debug.skip_new_payload) + // Store messages _after_ skipping so that `replay-engine` command + // would replay only the messages that were observed by the engine // during this run. .maybe_store_messages(node_config.debug.engine_api_store.clone()); From 0e3f031ada389dd740ed944e455de6f6a8c7e87a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 2 May 2024 19:01:45 +0200 Subject: [PATCH 173/250] feat(engine): make new payload canonical action (#8042) --- crates/consensus/beacon/src/engine/mod.rs | 69 ++++++++++++++--------- 1 file changed, 42 insertions(+), 27 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e8b27dc57..f3aa249fa 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -506,7 +506,7 @@ where } else { let previous_action = self .blockchain_tree_action - .replace(BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx }); + .replace(BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }); debug_assert!(previous_action.is_none(), "Pre-existing action found"); } } @@ -1624,7 +1624,7 @@ where action: BlockchainTreeAction, ) -> RethResult { match action { - BlockchainTreeAction::FcuMakeCanonical { state, attrs, tx } => { + BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { let start = Instant::now(); let result = self.blockchain.make_canonical(state.head_block_hash); let elapsed = self.record_make_canonical_latency(start, &result); @@ -1697,30 +1697,15 @@ where // if we're currently syncing and the inserted block is the targeted // FCU head block, we can try to make it canonical. if block_hash == target.head_block_hash { - if let Err((_hash, error)) = - self.try_make_sync_target_canonical(block_num_hash) - { - if error.is_fatal() { - let response = Err(BeaconOnNewPayloadError::Internal( - Box::new(error.clone()), - )); - let _ = tx.send(response); - return Err(RethError::Canonical(error)) - } - - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - let status = Ok(PayloadStatus::new( - PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - )); - let _ = tx.send(status); - return Ok(EngineEventOutcome::Processed) - } + let previous_action = self.blockchain_tree_action.replace( + BlockchainTreeAction::MakeNewPayloadCanonical { + payload_num_hash: block_num_hash, + status, + tx, + }, + ); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); + return Ok(EngineEventOutcome::Processed) } } // block was successfully inserted, so we can cancel the full block @@ -1728,6 +1713,31 @@ where self.sync.cancel_full_block_request(block_hash); } + trace!(target: "consensus::engine", ?status, "Returning payload status"); + let _ = tx.send(Ok(status)); + } + BlockchainTreeAction::MakeNewPayloadCanonical { payload_num_hash, status, tx } => { + let status = match self.try_make_sync_target_canonical(payload_num_hash) { + Ok(()) => status, + Err((_hash, error)) => { + if error.is_fatal() { + let response = + Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); + let _ = tx.send(response); + return Err(RethError::Canonical(error)) + } + + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + ) + } + }; + trace!(target: "consensus::engine", ?status, "Returning payload status"); let _ = tx.send(Ok(status)); } @@ -1859,7 +1869,7 @@ where } enum BlockchainTreeAction { - FcuMakeCanonical { + MakeForkchoiceHeadCanonical { state: ForkchoiceState, attrs: Option, tx: oneshot::Sender>, @@ -1868,6 +1878,11 @@ enum BlockchainTreeAction { block: SealedBlock, tx: oneshot::Sender>, }, + MakeNewPayloadCanonical { + payload_num_hash: BlockNumHash, + status: PayloadStatus, + tx: oneshot::Sender>, + }, } /// Represents outcomes of processing an engine event From 29be4072cb9a0e8d8a3999ca6d5b9e3efa96983e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 2 May 2024 19:24:19 +0200 Subject: [PATCH 174/250] fix(op): bootnodes (#7990) --- crates/primitives/src/lib.rs | 4 ++-- crates/primitives/src/net.rs | 36 +++++++++++++++--------------------- 2 files changed, 17 insertions(+), 23 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ae20cf6b2..3473ef82e 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -144,8 +144,8 @@ mod optimism { pub use crate::{ chain::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}, net::{ - base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, BASE_BOOTNODES, - BASE_TESTNET_BOOTNODES, OP_BOOTNODES, OP_TESTNET_BOOTNODES, + base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, OP_BOOTNODES, + OP_TESTNET_BOOTNODES, }, transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}, }; diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index c8ff2a3cc..068e47e5b 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -43,24 +43,13 @@ pub static HOLESKY_BOOTNODES : [&str; 2] = [ ]; #[cfg(feature = "optimism")] -/// OP Mainnet Bootnodes -pub static OP_BOOTNODES: [&str; 3] = [ +/// OP stack mainnet boot nodes. +pub static OP_BOOTNODES: &[&str] = &[ + // OP Labs "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", -]; - -#[cfg(feature = "optimism")] -/// OP Testnet Bootnodes -pub static OP_TESTNET_BOOTNODES: [&str; 3] = [ - "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", - "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", - "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", -]; - -#[cfg(feature = "optimism")] -/// Base Mainnet Bootnodes -pub static BASE_BOOTNODES: [&str; 5] = [ + // Base "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", @@ -69,8 +58,13 @@ pub static BASE_BOOTNODES: [&str; 5] = [ ]; #[cfg(feature = "optimism")] -/// Base Testnet Bootnodes -pub static BASE_TESTNET_BOOTNODES: [&str; 2] = [ +/// OP stack testnet boot nodes. +pub static OP_TESTNET_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + // Base "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", ]; @@ -98,25 +92,25 @@ pub fn holesky_nodes() -> Vec { #[cfg(feature = "optimism")] /// Returns parsed op-stack mainnet nodes pub fn op_nodes() -> Vec { - parse_nodes(&OP_BOOTNODES[..]) + parse_nodes(OP_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack testnet nodes pub fn op_testnet_nodes() -> Vec { - parse_nodes(&OP_TESTNET_BOOTNODES[..]) + parse_nodes(OP_TESTNET_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack base mainnet nodes pub fn base_nodes() -> Vec { - parse_nodes(&BASE_BOOTNODES[..]) + parse_nodes(OP_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack base testnet nodes pub fn base_testnet_nodes() -> Vec { - parse_nodes(&BASE_TESTNET_BOOTNODES[..]) + parse_nodes(OP_TESTNET_BOOTNODES) } /// Parses all the nodes From 9eb7d961d71757bad3efa6270c4d8c3b1d9c208d Mon Sep 17 00:00:00 2001 From: Darshan Kathiriya <8559992+lakshya-sky@users.noreply.github.com> Date: Thu, 2 May 2024 13:45:34 -0400 Subject: [PATCH 175/250] integrate FullNodeComponents in exexcomponents (#8051) --- Cargo.lock | 2 ++ crates/exex/Cargo.toml | 2 ++ crates/exex/src/context.rs | 50 +++++++++++++++++++++++---- crates/node/builder/src/launch/mod.rs | 4 +-- 4 files changed, 48 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fcb0dfa5e..ffb669d5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6910,8 +6910,10 @@ dependencies = [ "metrics", "reth-config", "reth-metrics", + "reth-network", "reth-node-api", "reth-node-core", + "reth-payload-builder", "reth-primitives", "reth-provider", "reth-tasks", diff --git a/crates/exex/Cargo.toml b/crates/exex/Cargo.toml index 71f9c8bde..d16cb53f7 100644 --- a/crates/exex/Cargo.toml +++ b/crates/exex/Cargo.toml @@ -21,6 +21,8 @@ reth-primitives.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true +reth-network.workspace = true +reth-payload-builder.workspace = true ## async tokio.workspace = true diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index df2b51377..733047400 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -1,4 +1,4 @@ -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, @@ -14,18 +14,12 @@ use crate::{ExExEvent, ExExNotification}; pub struct ExExContext { /// The current head of the blockchain at launch. pub head: Head, - /// The configured provider to interact with the blockchain. - pub provider: Node::Provider, - /// The task executor of the node. - pub task_executor: TaskExecutor, /// The data dir of the node. pub data_dir: ChainPath, /// The config of the node pub config: NodeConfig, /// The loaded node config pub reth_config: reth_config::Config, - /// The transaction pool of the node. - pub pool: Node::Pool, /// Channel used to send [`ExExEvent`]s to the rest of the node. /// /// # Important @@ -41,4 +35,46 @@ pub struct ExExContext { /// Once a an [`ExExNotification`] is sent over the channel, it is considered delivered by the /// node. pub notifications: Receiver, + + /// node components + pub components: Node, +} + +impl NodeTypes for ExExContext { + type Primitives = Node::Primitives; + type Engine = Node::Engine; +} + +impl FullNodeTypes for ExExContext { + type DB = Node::DB; + type Provider = Node::Provider; +} + +impl FullNodeComponents for ExExContext { + type Pool = Node::Pool; + type Evm = Node::Evm; + + fn pool(&self) -> &Self::Pool { + self.components.pool() + } + + fn provider(&self) -> &Self::Provider { + self.components.provider() + } + + fn network(&self) -> &reth_network::NetworkHandle { + self.components.network() + } + + fn payload_builder(&self) -> &reth_payload_builder::PayloadBuilderHandle { + self.components.payload_builder() + } + + fn task_executor(&self) -> &TaskExecutor { + self.components.task_executor() + } + + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index cd93dbe5e..201965fa9 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -196,12 +196,10 @@ where // create the launch context for the exex let context = ExExContext { head, - provider: blockchain_db.clone(), - task_executor: ctx.task_executor().clone(), data_dir: ctx.data_dir().clone(), config: ctx.node_config().clone(), reth_config: ctx.toml_config().clone(), - pool: node_adapter.components.pool().clone(), + components: node_adapter.clone(), events, notifications, }; From e90dc44be9bb3e9aaacc742b3cfa9d22a8252b5c Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Fri, 3 May 2024 00:02:36 +0530 Subject: [PATCH 176/250] feat: remove field ```max_gas_limit``` from ```BasicPayloadJobGeneratorConfig``` (#7949) --- crates/node-ethereum/src/node.rs | 3 +-- crates/optimism/node/src/node.rs | 3 +-- crates/payload/basic/src/lib.rs | 13 +------------ examples/custom-engine-types/src/main.rs | 3 +-- examples/custom-payload-builder/src/main.rs | 3 +-- 5 files changed, 5 insertions(+), 20 deletions(-) diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 9de0cbe6e..815b949de 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -175,8 +175,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index c95f3dd95..7e7d54703 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -224,8 +224,7 @@ where .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) // no extradata for OP - .extradata(Default::default()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(Default::default()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 8da9163d0..6529710ca 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -18,7 +18,7 @@ use reth_payload_builder::{ PayloadJobGenerator, }; use reth_primitives::{ - constants::{EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, RETH_CLIENT_VERSION, SLOT_DURATION}, + constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, proofs, BlockNumberOrTag, Bytes, ChainSpec, SealedBlock, Withdrawals, B256, U256, }; use reth_provider::{ @@ -251,8 +251,6 @@ impl PayloadTaskGuard { pub struct BasicPayloadJobGeneratorConfig { /// Data to include in the block's extra data field. extradata: Bytes, - /// Target gas ceiling for built blocks, defaults to [ETHEREUM_BLOCK_GAS_LIMIT] gas. - max_gas_limit: u64, /// The interval at which the job should build a new payload after the last. interval: Duration, /// The deadline for when the payload builder job should resolve. @@ -296,21 +294,12 @@ impl BasicPayloadJobGeneratorConfig { self.extradata = extradata; self } - - /// Sets the target gas ceiling for mined blocks. - /// - /// Defaults to [ETHEREUM_BLOCK_GAS_LIMIT] gas. - pub fn max_gas_limit(mut self, max_gas_limit: u64) -> Self { - self.max_gas_limit = max_gas_limit; - self - } } impl Default for BasicPayloadJobGeneratorConfig { fn default() -> Self { Self { extradata: alloy_rlp::encode(RETH_CLIENT_VERSION.as_bytes()).into(), - max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, interval: Duration::from_secs(1), // 12s slot time deadline: SLOT_DURATION, diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ada28c0f3..d16146420 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -235,8 +235,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 2c468c34a..b2bc6af36 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -47,8 +47,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = EmptyBlockPayloadJobGenerator::with_builder( ctx.provider().clone(), From 1a1c24ba24be968ec40c01c10b1c3c3d638d7bfc Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Fri, 3 May 2024 00:40:45 +0530 Subject: [PATCH 177/250] dropped ```RUST_LOG=info``` from recommended commands (#8054) --- book/jsonrpc/intro.md | 2 +- book/run/mainnet.md | 8 ++++---- book/run/observability.md | 2 +- book/run/pruning.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/book/jsonrpc/intro.md b/book/jsonrpc/intro.md index 1c602f6d2..21ded5bcc 100644 --- a/book/jsonrpc/intro.md +++ b/book/jsonrpc/intro.md @@ -114,7 +114,7 @@ You can use `curl`, a programming language with a low-level library, or a tool l As a reminder, you need to run the command below to enable all of these APIs using an HTTP transport: ```bash -RUST_LOG=info reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" +reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" ``` This allows you to then call: diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 67e70b9db..4412f51c7 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -20,12 +20,12 @@ First, ensure that you have Reth installed by following the [installation instru Now, to start the archive node, run: ```bash -RUST_LOG=info reth node +reth node ``` And to start the full node, run: ```bash -RUST_LOG=info reth node --full +reth node --full ``` On differences between archive and full nodes, see [Pruning & Full Node](./pruning.md#basic-concepts) section. @@ -39,7 +39,7 @@ You can override this path using the `--authrpc.jwtsecret` option. You MUST use So one might do: ```bash -RUST_LOG=info reth node \ +reth node \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ --authrpc.port 8551 @@ -54,7 +54,7 @@ First, make sure you have Lighthouse installed. Sigma Prime provides excellent [ Assuming you have done that, run: ```bash -RUST_LOG=info lighthouse bn \ +lighthouse bn \ --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ --execution-endpoint http://localhost:8551 \ --execution-jwt /path/to/secret diff --git a/book/run/observability.md b/book/run/observability.md index 39d485e1f..9f0f1b852 100644 --- a/book/run/observability.md +++ b/book/run/observability.md @@ -3,7 +3,7 @@ Reth exposes a number of metrics, which are listed [here][metrics]. We can serve them from an HTTP endpoint by adding the `--metrics` flag: ```bash -RUST_LOG=info reth node --metrics 127.0.0.1:9001 +reth node --metrics 127.0.0.1:9001 ``` Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: diff --git a/book/run/pruning.md b/book/run/pruning.md index 4e6966551..b6f23f544 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -39,7 +39,7 @@ To run Reth as a full node, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: ```bash -RUST_LOG=info reth node \ +reth node \ --full \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ From 232e7bf19b8948718325e3eaaf1629d69efae4db Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 20:31:48 +0100 Subject: [PATCH 178/250] feat(cli): make `db stats` non-detailed by default (#8056) --- bin/reth/src/commands/db/stats.rs | 40 ++--- book/SUMMARY.md | 2 + book/cli/SUMMARY.md | 2 + book/cli/reth.md | 1 + book/cli/reth/db.md | 1 + book/cli/reth/db/stats.md | 13 +- book/cli/reth/import.md | 6 + book/cli/reth/node.md | 203 ++++++++++++----------- book/cli/reth/p2p.md | 44 ++++- book/cli/reth/stage/drop.md | 4 +- book/cli/reth/stage/run.md | 38 ++++- book/cli/reth/stage/unwind.md | 115 ++++++++++++- book/cli/reth/stage/unwind/num-blocks.md | 2 +- book/cli/reth/stage/unwind/to-block.md | 2 +- 14 files changed, 337 insertions(+), 136 deletions(-) diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 474603c74..b47e7980b 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -25,11 +25,11 @@ use tracing::info; pub struct Command { /// Show only the total size for static files. #[arg(long, default_value_t = false)] - only_total_size: bool, + detailed_sizes: bool, - /// Show only the summary per static file segment. + /// Show detailed information per static file segment. #[arg(long, default_value_t = false)] - summary: bool, + detailed_segments: bool, /// Show a checksum of each table in the database. /// @@ -152,7 +152,7 @@ impl Command { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); - if !self.only_total_size { + if self.detailed_sizes { table.set_header([ "Segment", "Block Range", @@ -216,18 +216,7 @@ impl Command { .map(|metadata| metadata.len()) .unwrap_or_default(); - if self.summary { - if segment_columns > 0 { - assert_eq!(segment_columns, columns); - } else { - segment_columns = columns; - } - segment_rows += rows; - segment_data_size += data_size; - segment_index_size += index_size; - segment_offsets_size += offsets_size; - segment_config_size += config_size; - } else { + if self.detailed_segments { let mut row = Row::new(); row.add_cell(Cell::new(segment)) .add_cell(Cell::new(format!("{block_range}"))) @@ -235,7 +224,7 @@ impl Command { tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), )) .add_cell(Cell::new(format!("{columns} x {rows}"))); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(data_size as f64))) .add_cell(Cell::new(human_bytes(index_size as f64))) .add_cell(Cell::new(human_bytes(offsets_size as f64))) @@ -245,6 +234,17 @@ impl Command { (data_size + index_size + offsets_size + config_size) as f64, ))); table.add_row(row); + } else { + if segment_columns > 0 { + assert_eq!(segment_columns, columns); + } else { + segment_columns = columns; + } + segment_rows += rows; + segment_data_size += data_size; + segment_index_size += index_size; + segment_offsets_size += offsets_size; + segment_config_size += config_size; } total_data_size += data_size; @@ -253,7 +253,7 @@ impl Command { total_config_size += config_size; } - if self.summary { + if !self.detailed_segments { let first_ranges = ranges.first().expect("not empty list of ranges"); let last_ranges = ranges.last().expect("not empty list of ranges"); @@ -271,7 +271,7 @@ impl Command { tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), )) .add_cell(Cell::new(format!("{segment_columns} x {segment_rows}"))); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(segment_data_size as f64))) .add_cell(Cell::new(human_bytes(segment_index_size as f64))) .add_cell(Cell::new(human_bytes(segment_offsets_size as f64))) @@ -299,7 +299,7 @@ impl Command { .add_cell(Cell::new("")) .add_cell(Cell::new("")) .add_cell(Cell::new("")); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(total_data_size as f64))) .add_cell(Cell::new(human_bytes(total_index_size as f64))) .add_cell(Cell::new(human_bytes(total_offsets_size as f64))) diff --git a/book/SUMMARY.md b/book/SUMMARY.md index ffd5f67e0..fc6deb282 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -30,11 +30,13 @@ - [`reth`](./cli/reth.md) - [`reth node`](./cli/reth/node.md) - [`reth init`](./cli/reth/init.md) + - [`reth init-state`](./cli/reth/init-state.md) - [`reth import`](./cli/reth/import.md) - [`reth dump-genesis`](./cli/reth/dump-genesis.md) - [`reth db`](./cli/reth/db.md) - [`reth db stats`](./cli/reth/db/stats.md) - [`reth db list`](./cli/reth/db/list.md) + - [`reth db checksum`](./cli/reth/db/checksum.md) - [`reth db diff`](./cli/reth/db/diff.md) - [`reth db get`](./cli/reth/db/get.md) - [`reth db get mdbx`](./cli/reth/db/get/mdbx.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 07711434e..ee3d714b2 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -1,11 +1,13 @@ - [`reth`](./reth.md) - [`reth node`](./reth/node.md) - [`reth init`](./reth/init.md) + - [`reth init-state`](./reth/init-state.md) - [`reth import`](./reth/import.md) - [`reth dump-genesis`](./reth/dump-genesis.md) - [`reth db`](./reth/db.md) - [`reth db stats`](./reth/db/stats.md) - [`reth db list`](./reth/db/list.md) + - [`reth db checksum`](./reth/db/checksum.md) - [`reth db diff`](./reth/db/diff.md) - [`reth db get`](./reth/db/get.md) - [`reth db get mdbx`](./reth/db/get/mdbx.md) diff --git a/book/cli/reth.md b/book/cli/reth.md index f213a30f2..8b6f757c9 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -9,6 +9,7 @@ Usage: reth [OPTIONS] Commands: node Start the node init Initialize the database from a genesis file + init-state Initialize the database from a state dump file import This syncs RLP encoded blocks from a file dump-genesis Dumps genesis block JSON configuration to stdout db Database debugging utilities diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 77137dadb..bd5989d7f 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -9,6 +9,7 @@ Usage: reth db [OPTIONS] Commands: stats Lists all the tables, their entry count and their size list Lists the contents of a table + checksum Calculates the content checksum of a table diff Create a diff between two database tables or two entire databases get Gets the content of a table for the given key drop Deletes all database entries diff --git a/book/cli/reth/db/stats.md b/book/cli/reth/db/stats.md index dea5e3d05..437c10bd0 100644 --- a/book/cli/reth/db/stats.md +++ b/book/cli/reth/db/stats.md @@ -18,7 +18,7 @@ Options: [default: default] - --only-total-size + --detailed-sizes Show only the total size for static files --chain @@ -30,8 +30,15 @@ Options: [default: mainnet] - --summary - Show only the summary per static file segment + --detailed-segments + Show detailed information per static file segment + + --checksum + Show a checksum of each table in the database. + + WARNING: this option will take a long time to run, as it needs to traverse and hash the entire database. + + For individual table checksums, use the `reth db checksum` command. --instance Add a new instance of a node. diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 382efb8ef..411527f9e 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -30,6 +30,12 @@ Options: [default: mainnet] + --no-state + Disables stages that require state. + + --chunk-len + Chunk byte length. + --instance Add a new instance of a node. diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 450180c84..d1972a608 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -4,21 +4,18 @@ Start the node ```bash $ reth node --help - -Start the node - Usage: reth node [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --config @@ -27,26 +24,26 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] --with-unused-ports Sets all ports to unused, allowing the OS to choose random unused ports when sockets are bound. - + Mutually exclusive with `--instance`. -h, --help @@ -55,7 +52,7 @@ Options: Metrics: --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. Networking: @@ -73,27 +70,42 @@ Networking: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr The UDP address to use for devp2p peer discovery version 5 - + [default: 0.0.0.0] --discovery.v5.port The UDP port to use for devp2p peer discovery version 5 - + [default: 9000] + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only @@ -101,7 +113,7 @@ Networking: --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -110,12 +122,12 @@ Networking: --identity Custom node identity - - [default: reth/-/-gnu] + + [default: reth/-/] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -123,17 +135,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -144,14 +156,14 @@ Networking: --pooled-tx-response-soft-limit Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - + . - + [default: 2097152] --pooled-tx-pack-soft-limit Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + [default: 131072] RPC: @@ -160,17 +172,17 @@ RPC: --http.addr Http server address to listen on - + [default: 127.0.0.1] --http.port Http server port to listen on - + [default: 8545] --http.api Rpc Modules to be configured for the HTTP server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --http.corsdomain @@ -181,12 +193,12 @@ RPC: --ws.addr Ws server address to listen on - + [default: 127.0.0.1] --ws.port Ws server port to listen on - + [default: 8546] --ws.origins @@ -194,7 +206,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --ipcdisable @@ -202,176 +214,176 @@ RPC: --ipcpath Filename for IPC socket/pipe within the datadir - - [default: /tmp/reth.ipc] + + [default: .ipc] --authrpc.addr Auth server address to listen on - + [default: 127.0.0.1] --authrpc.port Auth server port to listen on - + [default: 8551] --authrpc.jwtsecret Path to a JWT secret to use for the authenticated engine-API RPC server. - + This will enforce JWT authentication for all requests coming from the consensus layer. - + If no path is provided, a secret will be generated and stored in the datadir under `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. --auth-ipc - Enable auth engine api over IPC + Enable auth engine API over IPC --auth-ipc.path Filename for auth IPC socket/pipe within the datadir - - [default: /tmp/reth_engine_api.ipc] + + [default: _engine_api.ipc] --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. - + This is __not__ used for the authenticated engine-API RPC server, see `--authrpc.jwtsecret`. --rpc.max-request-size Set the maximum RPC request payload size for both HTTP and WS in megabytes - + [default: 15] --rpc.max-response-size Set the maximum RPC response payload size for both HTTP and WS in megabytes - + [default: 160] [aliases: rpc.returndata.limit] --rpc.max-subscriptions-per-connection Set the maximum concurrent subscriptions per connection - + [default: 1024] --rpc.max-connections Maximum number of RPC server connections - + [default: 500] --rpc.max-tracing-requests Maximum number of concurrent tracing requests - - [default: 14] + + [default: 8] --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - + [default: 100000] --rpc.max-logs-per-response Maximum number of logs that can be returned in a single response. (0 = no limit) - + [default: 20000] --rpc.gascap Maximum gas limit for `eth_call` and call tracing RPC methods - + [default: 50000000] RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache - + [default: 5000] --rpc-cache.max-receipts Max number receipts in cache - + [default: 2000] --rpc-cache.max-envs Max number of bytes for cached env data - + [default: 1000] --rpc-cache.max-concurrent-db-requests Max number of concurrent database requests - + [default: 512] Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price - + [default: 20] --gpo.ignoreprice Gas Price below which gpo will ignore transactions - + [default: 2] --gpo.maxprice Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo - + [default: 500000000000] --gpo.percentile The percentile of gas prices to use for the estimate - + [default: 60] TxPool: --txpool.pending-max-count Max number of transaction in the pending sub-pool - + [default: 10000] --txpool.pending-max-size Max size of the pending sub-pool in megabytes - + [default: 20] --txpool.basefee-max-count Max number of transaction in the basefee sub-pool - + [default: 10000] --txpool.basefee-max-size Max size of the basefee sub-pool in megabytes - + [default: 20] --txpool.queued-max-count Max number of transaction in the queued sub-pool - + [default: 10000] --txpool.queued-max-size Max size of the queued sub-pool in megabytes - + [default: 20] --txpool.max-account-slots Max number of executable transaction slots guaranteed per account - + [default: 16] --txpool.pricebump Price bump (in %) for the transaction pool underpriced check - + [default: 10] --blobpool.pricebump Price bump percentage to replace an already existing blob transaction - + [default: 100] --txpool.max-tx-input-bytes Max size in bytes of a single transaction allowed to enter the pool - + [default: 131072] --txpool.max-cached-entries The maximum number of blobs to keep in the in memory blob cache - + [default: 100] --txpool.nolocals @@ -386,33 +398,33 @@ TxPool: Builder: --builder.extradata Block extra data set by the payload builder - + [default: reth//] --builder.gaslimit Target gas ceiling for built blocks - + [default: 30000000] --builder.interval The interval at which the job should build a new payload after the last (in seconds) - + [default: 1] --builder.deadline The deadline for when the payload builder job should resolve - + [default: 12] --builder.max-tasks Maximum number of tasks to spawn for building a payload - + [default: 3] Debug: --debug.continuous Prompt the downloader to download blocks one at a time. - + NOTE: This is for testing purposes only. --debug.terminate @@ -420,7 +432,7 @@ Debug: --debug.tip Set the chain tip manually for testing purposes. - + NOTE: This is a temporary flag --debug.max-block @@ -438,6 +450,9 @@ Debug: --debug.hook-all Hook on every transaction in a block + --debug.skip-fcu + If provided, the engine will skip `n` consecutive FCUs + --debug.engine-api-store The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location @@ -457,13 +472,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Dev testnet: --dev Start the node in dev mode - + This mode uses a local proof-of-authority consensus engine with either fixed block times or automatically mined blocks. Disables network discovery and enables local http server. @@ -475,7 +490,7 @@ Dev testnet: --dev.block-time Interval between blocks. - + Parses strings using [humantime::parse_duration] --dev.block-time 12s @@ -486,7 +501,7 @@ Pruning: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -496,12 +511,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -511,22 +526,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -534,12 +549,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -550,7 +565,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info @@ -559,4 +574,4 @@ Display: -q, --quiet Silence all log output -``` +``` \ No newline at end of file diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 17cd396cf..6f1c1d3e6 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -49,24 +49,36 @@ Options: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking + The UDP address to use for devp2p peer discovery version 4 [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking + The UDP port to use for devp2p peer discovery version 4 [default: 30303] - --trusted-peer - Target trusted peer + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] - --trusted-only - Connect only to trusted peers + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] - --retries - The number of retries per request + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap [default: 5] @@ -81,6 +93,22 @@ Options: [default: 1] + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + + --trusted-peer + Target trusted peer + + --trusted-only + Connect only to trusted peers + + --retries + The number of retries per request + + [default: 5] + --nat [default: any] diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 2efe9ed78..2b647574c 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -68,8 +68,8 @@ Database: - execution: The execution stage within the pipeline - account-hashing: The account hashing stage within the pipeline - storage-hashing: The storage hashing stage within the pipeline - - hashing: The hashing stage within the pipeline - - merkle: The Merkle stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline - tx-lookup: The transaction lookup stage within the pipeline - account-history: The account history stage within the pipeline - storage-history: The storage history stage within the pipeline diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index f20eb3f68..348f082c4 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -17,8 +17,8 @@ Arguments: - execution: The execution stage within the pipeline - account-hashing: The account hashing stage within the pipeline - storage-hashing: The storage hashing stage within the pipeline - - hashing: The hashing stage within the pipeline - - merkle: The Merkle stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline - tx-lookup: The transaction lookup stage within the pipeline - account-history: The account history stage within the pipeline - storage-history: The storage history stage within the pipeline @@ -96,16 +96,44 @@ Networking: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking + The UDP address to use for devp2p peer discovery version 4 [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking + The UDP port to use for devp2p peer discovery version 4 [default: 30303] + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. @@ -126,7 +154,7 @@ Networking: --identity Custom node identity - [default: reth/-/-gnu] + [default: reth/-/] --p2p-secret-key Secret key to use for this node. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 8479bca51..44968aede 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -7,8 +7,8 @@ $ reth stage unwind --help Usage: reth stage unwind [OPTIONS] Commands: - to-block Unwinds the database until the given block number (range is inclusive) - num-blocks Unwinds the given number of blocks from the database + to-block Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included + num-blocks Unwinds the database from the latest block, until the given number of blocks have been reached help Print this message or the help of the given subcommand(s) Options: @@ -65,6 +65,117 @@ Database: [possible values: true, false] +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect only to trusted peers + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --pooled-tx-response-soft-limit + Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. + + . + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB + + [default: 131072] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/cli/reth/stage/unwind/num-blocks.md index 9737bd4fa..24d2bc516 100644 --- a/book/cli/reth/stage/unwind/num-blocks.md +++ b/book/cli/reth/stage/unwind/num-blocks.md @@ -1,6 +1,6 @@ # reth stage unwind num-blocks -Unwinds the given number of blocks from the database +Unwinds the database from the latest block, until the given number of blocks have been reached ```bash $ reth stage unwind num-blocks --help diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/cli/reth/stage/unwind/to-block.md index 74f8ec4b7..f8aa3bd6e 100644 --- a/book/cli/reth/stage/unwind/to-block.md +++ b/book/cli/reth/stage/unwind/to-block.md @@ -1,6 +1,6 @@ # reth stage unwind to-block -Unwinds the database until the given block number (range is inclusive) +Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included ```bash $ reth stage unwind to-block --help From 2ac2433a96cf27895565d4e138bb79961bf3154e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 2 May 2024 21:19:44 +0100 Subject: [PATCH 179/250] feat(examples): add blob support to the rollup ExEx (#8028) --- Cargo.lock | 150 ++++- .../transaction-pool/src/test_utils/mock.rs | 1 - deny.toml | 1 + examples/exex/rollup/Cargo.toml | 8 +- examples/exex/rollup/rollup_abi.json | 627 +----------------- examples/exex/rollup/src/execution.rs | 488 ++++++++++++++ examples/exex/rollup/src/main.rs | 400 ++--------- 7 files changed, 655 insertions(+), 1020 deletions(-) create mode 100644 examples/exex/rollup/src/execution.rs diff --git a/Cargo.lock b/Cargo.lock index ffb669d5b..24b07b8ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,16 +135,30 @@ name = "alloy-consensus" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "c-kzg", "serde", "sha2 0.10.8", "thiserror", ] +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "c-kzg", + "serde", + "sha2 0.10.8", +] + [[package]] name = "alloy-dyn-abi" version = "0.7.1" @@ -170,7 +184,7 @@ source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f67 dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "arbitrary", "c-kzg", "derive_more", @@ -182,13 +196,37 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "c-kzg", + "once_cell", + "serde", +] + [[package]] name = "alloy-genesis" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-primitives", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", "serde", "serde_json", ] @@ -222,11 +260,11 @@ name = "alloy-network" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", - "alloy-eips", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -239,7 +277,7 @@ name = "alloy-node-bindings" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "k256", "serde_json", @@ -281,12 +319,12 @@ name = "alloy-provider" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -351,12 +389,12 @@ name = "alloy-rpc-types" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -368,13 +406,31 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "serde", ] @@ -383,12 +439,12 @@ name = "alloy-rpc-types-engine" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", - "alloy-eips", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", - "alloy-serde", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -404,8 +460,8 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ "alloy-primitives", - "alloy-rpc-types", - "alloy-serde", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "serde", "serde_json", ] @@ -420,6 +476,16 @@ dependencies = [ "serde_json", ] +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + [[package]] name = "alloy-signer" version = "0.1.0" @@ -438,7 +504,7 @@ name = "alloy-signer-wallet" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -2885,9 +2951,11 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rlp", "alloy-sol-types", "eyre", + "foundry-blob-explorers", "futures", "once_cell", "reth", @@ -3038,6 +3106,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "foundry-blob-explorers" +version = "0.1.0" +source = "git+https://github.com/foundry-rs/block-explorers#cd824d3fc53feca59ca6a2fc76f191fbb3ac2011" +dependencies = [ + "alloy-chains", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "chrono", + "reqwest 0.12.4", + "serde", +] + [[package]] name = "fragile" version = "2.0.0" @@ -6258,6 +6341,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.52.0", ] @@ -6480,8 +6564,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips", - "alloy-genesis", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "arbitrary", "bytes", @@ -6686,9 +6770,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-network", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7422,8 +7506,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips", - "alloy-genesis", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7686,10 +7770,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7716,7 +7800,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7821,7 +7905,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "reth-primitives", "secp256k1", ] @@ -7958,7 +8042,7 @@ version = "0.1.0" source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=3d2077e#3d2077ee665046c256448a8bd90d8e93ea85de56" dependencies = [ "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index bcacff2da..17ad1f7c3 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -733,7 +733,6 @@ impl PoolTransaction for MockTransaction { fn chain_id(&self) -> Option { match self { MockTransaction::Legacy { chain_id, .. } => *chain_id, - MockTransaction::Eip1559 { chain_id, .. } | MockTransaction::Eip4844 { chain_id, .. } | MockTransaction::Eip2930 { chain_id, .. } => Some(*chain_id), diff --git a/deny.toml b/deny.toml index 61cced4fb..38994d197 100644 --- a/deny.toml +++ b/deny.toml @@ -90,6 +90,7 @@ unknown-git = "deny" allow-git = [ # TODO: remove, see ./Cargo.toml "https://github.com/alloy-rs/alloy", + "https://github.com/foundry-rs/block-explorers", "https://github.com/paradigmxyz/evm-inspectors", "https://github.com/sigp/discv5", ] diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml index 8d338c241..f32a77629 100644 --- a/examples/exex/rollup/Cargo.toml +++ b/examples/exex/rollup/Cargo.toml @@ -21,16 +21,18 @@ reth-tracing.workspace = true reth-trie.workspace = true # async -tokio.workspace = true futures.workspace = true +tokio.workspace = true # misc -alloy-sol-types = { workspace = true, features = ["json"] } +alloy-consensus = { workspace = true, features = ["kzg"] } alloy-rlp.workspace = true +alloy-sol-types = { workspace = true, features = ["json"] } eyre.workspace = true +foundry-blob-explorers = { git = "https://github.com/foundry-rs/block-explorers" } +once_cell.workspace = true rusqlite = { version = "0.31.0", features = ["bundled"] } serde_json.workspace = true -once_cell.workspace = true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/examples/exex/rollup/rollup_abi.json b/examples/exex/rollup/rollup_abi.json index 08bc23f0e..d7278e9f6 100644 --- a/examples/exex/rollup/rollup_abi.json +++ b/examples/exex/rollup/rollup_abi.json @@ -1,626 +1 @@ -[ - { - "inputs": [ - { "internalType": "address", "name": "admin", "type": "address" } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { "inputs": [], "name": "AccessControlBadConfirmation", "type": "error" }, - { - "inputs": [ - { "internalType": "uint48", "name": "schedule", "type": "uint48" } - ], - "name": "AccessControlEnforcedDefaultAdminDelay", - "type": "error" - }, - { - "inputs": [], - "name": "AccessControlEnforcedDefaultAdminRules", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "defaultAdmin", - "type": "address" - } - ], - "name": "AccessControlInvalidDefaultAdmin", - "type": "error" - }, - { - "inputs": [ - { "internalType": "address", "name": "account", "type": "address" }, - { - "internalType": "bytes32", - "name": "neededRole", - "type": "bytes32" - } - ], - "name": "AccessControlUnauthorizedAccount", - "type": "error" - }, - { - "inputs": [ - { "internalType": "uint256", "name": "expected", "type": "uint256" } - ], - "name": "BadSequence", - "type": "error" - }, - { "inputs": [], "name": "BadSignature", "type": "error" }, - { "inputs": [], "name": "BlockExpired", "type": "error" }, - { - "inputs": [ - { - "internalType": "address", - "name": "sequencer", - "type": "address" - } - ], - "name": "NotSequencer", - "type": "error" - }, - { "inputs": [], "name": "OrderExpired", "type": "error" }, - { - "inputs": [ - { "internalType": "uint8", "name": "bits", "type": "uint8" }, - { "internalType": "uint256", "name": "value", "type": "uint256" } - ], - "name": "SafeCastOverflowedUintDowncast", - "type": "error" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "sequencer", - "type": "address" - }, - { - "components": [ - { - "internalType": "uint256", - "name": "rollupChainId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "sequence", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "confirmBy", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "address", - "name": "rewardAddress", - "type": "address" - } - ], - "indexed": true, - "internalType": "struct CalldataZenith.BlockHeader", - "name": "header", - "type": "tuple" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "blockData", - "type": "bytes" - } - ], - "name": "BlockSubmitted", - "type": "event" - }, - { - "anonymous": false, - "inputs": [], - "name": "DefaultAdminDelayChangeCanceled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint48", - "name": "newDelay", - "type": "uint48" - }, - { - "indexed": false, - "internalType": "uint48", - "name": "effectSchedule", - "type": "uint48" - } - ], - "name": "DefaultAdminDelayChangeScheduled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [], - "name": "DefaultAdminTransferCanceled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "newAdmin", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint48", - "name": "acceptSchedule", - "type": "uint48" - } - ], - "name": "DefaultAdminTransferScheduled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "token", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "rollupRecipient", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "Enter", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "token", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "hostRecipient", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "ExitFilled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "role", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "previousAdminRole", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "newAdminRole", - "type": "bytes32" - } - ], - "name": "RoleAdminChanged", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "role", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "address", - "name": "account", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - } - ], - "name": "RoleGranted", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "role", - "type": "bytes32" - }, - { - "indexed": true, - "internalType": "address", - "name": "account", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - } - ], - "name": "RoleRevoked", - "type": "event" - }, - { "stateMutability": "payable", "type": "fallback" }, - { - "inputs": [], - "name": "DEFAULT_ADMIN_ROLE", - "outputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "SEQUENCER_ROLE", - "outputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "acceptDefaultAdminTransfer", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "address", "name": "newAdmin", "type": "address" } - ], - "name": "beginDefaultAdminTransfer", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "uint256", - "name": "rollupChainId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "sequence", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "confirmBy", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "address", - "name": "rewardAddress", - "type": "address" - } - ], - "internalType": "struct CalldataZenith.BlockHeader", - "name": "header", - "type": "tuple" - }, - { "internalType": "bytes", "name": "blockData", "type": "bytes" } - ], - "name": "blockCommitment", - "outputs": [ - { "internalType": "bytes32", "name": "commit", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "cancelDefaultAdminTransfer", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "uint48", "name": "newDelay", "type": "uint48" } - ], - "name": "changeDefaultAdminDelay", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "defaultAdmin", - "outputs": [ - { "internalType": "address", "name": "", "type": "address" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "defaultAdminDelay", - "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "defaultAdminDelayIncreaseWait", - "outputs": [{ "internalType": "uint48", "name": "", "type": "uint48" }], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "rollupRecipient", - "type": "address" - } - ], - "name": "enter", - "outputs": [], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "address", - "name": "token", - "type": "address" - }, - { - "internalType": "address", - "name": "recipient", - "type": "address" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "deadline", - "type": "uint256" - } - ], - "internalType": "struct HostPassage.ExitOrder[]", - "name": "orders", - "type": "tuple[]" - } - ], - "name": "fulfillExits", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" } - ], - "name": "getRoleAdmin", - "outputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "grantRole", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "hasRole", - "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ], - "name": "nextSequence", - "outputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "owner", - "outputs": [ - { "internalType": "address", "name": "", "type": "address" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "pendingDefaultAdmin", - "outputs": [ - { - "internalType": "address", - "name": "newAdmin", - "type": "address" - }, - { "internalType": "uint48", "name": "schedule", "type": "uint48" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "pendingDefaultAdminDelay", - "outputs": [ - { "internalType": "uint48", "name": "newDelay", "type": "uint48" }, - { "internalType": "uint48", "name": "schedule", "type": "uint48" } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "renounceRole", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "role", "type": "bytes32" }, - { "internalType": "address", "name": "account", "type": "address" } - ], - "name": "revokeRole", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "rollbackDefaultAdminDelay", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "components": [ - { - "internalType": "uint256", - "name": "rollupChainId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "sequence", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "confirmBy", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "address", - "name": "rewardAddress", - "type": "address" - } - ], - "internalType": "struct CalldataZenith.BlockHeader", - "name": "header", - "type": "tuple" - }, - { "internalType": "bytes", "name": "blockData", "type": "bytes" }, - { "internalType": "uint8", "name": "v", "type": "uint8" }, - { "internalType": "bytes32", "name": "r", "type": "bytes32" }, - { "internalType": "bytes32", "name": "s", "type": "bytes32" } - ], - "name": "submitBlock", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes4", - "name": "interfaceId", - "type": "bytes4" - } - ], - "name": "supportsInterface", - "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], - "stateMutability": "view", - "type": "function" - }, - { "stateMutability": "payable", "type": "receive" } -] +{"abi":[{"type":"constructor","inputs":[{"name":"defaultRollupChainId","type":"uint256","internalType":"uint256"},{"name":"admin","type":"address","internalType":"address"}],"stateMutability":"nonpayable"},{"type":"fallback","stateMutability":"payable"},{"type":"receive","stateMutability":"payable"},{"type":"function","name":"DEFAULT_ADMIN_ROLE","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"SEQUENCER_ROLE","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"acceptDefaultAdminTransfer","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"beginDefaultAdminTransfer","inputs":[{"name":"newAdmin","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"blockCommitment","inputs":[{"name":"header","type":"tuple","internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"commit","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"cancelDefaultAdminTransfer","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"changeDefaultAdminDelay","inputs":[{"name":"newDelay","type":"uint48","internalType":"uint48"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"defaultAdmin","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"defaultAdminDelay","inputs":[],"outputs":[{"name":"","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"defaultAdminDelayIncreaseWait","inputs":[],"outputs":[{"name":"","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"enter","inputs":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"rollupRecipient","type":"address","internalType":"address"},{"name":"token","type":"address","internalType":"address"},{"name":"amount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"enter","inputs":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"rollupRecipient","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"fulfillExits","inputs":[{"name":"orders","type":"tuple[]","internalType":"struct Passage.ExitOrder[]","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"token","type":"address","internalType":"address"},{"name":"recipient","type":"address","internalType":"address"},{"name":"amount","type":"uint256","internalType":"uint256"}]}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"getRoleAdmin","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"grantRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"hasRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"lastSubmittedAtBlock","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"nextSequence","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"pendingDefaultAdmin","inputs":[],"outputs":[{"name":"newAdmin","type":"address","internalType":"address"},{"name":"schedule","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"pendingDefaultAdminDelay","inputs":[],"outputs":[{"name":"newDelay","type":"uint48","internalType":"uint48"},{"name":"schedule","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"renounceRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"revokeRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"rollbackDefaultAdminDelay","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"submitBlock","inputs":[{"name":"header","type":"tuple","internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","internalType":"bytes32"},{"name":"v","type":"uint8","internalType":"uint8"},{"name":"r","type":"bytes32","internalType":"bytes32"},{"name":"s","type":"bytes32","internalType":"bytes32"},{"name":"blockData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"supportsInterface","inputs":[{"name":"interfaceId","type":"bytes4","internalType":"bytes4"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"event","name":"BlockData","inputs":[{"name":"blockData","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"BlockSubmitted","inputs":[{"name":"sequencer","type":"address","indexed":true,"internalType":"address"},{"name":"header","type":"tuple","indexed":true,"internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"DefaultAdminDelayChangeCanceled","inputs":[],"anonymous":false},{"type":"event","name":"DefaultAdminDelayChangeScheduled","inputs":[{"name":"newDelay","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"effectSchedule","type":"uint48","indexed":false,"internalType":"uint48"}],"anonymous":false},{"type":"event","name":"DefaultAdminTransferCanceled","inputs":[],"anonymous":false},{"type":"event","name":"DefaultAdminTransferScheduled","inputs":[{"name":"newAdmin","type":"address","indexed":true,"internalType":"address"},{"name":"acceptSchedule","type":"uint48","indexed":false,"internalType":"uint48"}],"anonymous":false},{"type":"event","name":"Enter","inputs":[{"name":"rollupChainId","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"token","type":"address","indexed":true,"internalType":"address"},{"name":"rollupRecipient","type":"address","indexed":true,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"ExitFilled","inputs":[{"name":"rollupChainId","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"token","type":"address","indexed":true,"internalType":"address"},{"name":"hostRecipient","type":"address","indexed":true,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"RoleAdminChanged","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"previousAdminRole","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"newAdminRole","type":"bytes32","indexed":true,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"RoleGranted","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"account","type":"address","indexed":true,"internalType":"address"},{"name":"sender","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"RoleRevoked","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"account","type":"address","indexed":true,"internalType":"address"},{"name":"sender","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"error","name":"AccessControlBadConfirmation","inputs":[]},{"type":"error","name":"AccessControlEnforcedDefaultAdminDelay","inputs":[{"name":"schedule","type":"uint48","internalType":"uint48"}]},{"type":"error","name":"AccessControlEnforcedDefaultAdminRules","inputs":[]},{"type":"error","name":"AccessControlInvalidDefaultAdmin","inputs":[{"name":"defaultAdmin","type":"address","internalType":"address"}]},{"type":"error","name":"AccessControlUnauthorizedAccount","inputs":[{"name":"account","type":"address","internalType":"address"},{"name":"neededRole","type":"bytes32","internalType":"bytes32"}]},{"type":"error","name":"BadSequence","inputs":[{"name":"expected","type":"uint256","internalType":"uint256"}]},{"type":"error","name":"BadSignature","inputs":[{"name":"derivedSequencer","type":"address","internalType":"address"}]},{"type":"error","name":"BlockExpired","inputs":[]},{"type":"error","name":"OneRollupBlockPerHostBlock","inputs":[]},{"type":"error","name":"OrderExpired","inputs":[]},{"type":"error","name":"SafeCastOverflowedUintDowncast","inputs":[{"name":"bits","type":"uint8","internalType":"uint8"},{"name":"value","type":"uint256","internalType":"uint256"}]}],"bytecode":{"object":"0x60a060405234801561000f575f80fd5b50604051611a98380380611a9883398101604081905261002e916101ae565b608082905262015180816001600160a01b03811661006557604051636116401160e11b81525f600482015260240160405180910390fd5b600180546001600160d01b0316600160d01b65ffffffffffff85160217905561008e5f82610098565b50505050506101e8565b5f826100f4575f6100b16002546001600160a01b031690565b6001600160a01b0316146100d857604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b6100fe8383610107565b90505b92915050565b5f828152602081815260408083206001600160a01b038516845290915281205460ff166101a7575f838152602081815260408083206001600160a01b03861684529091529020805460ff1916600117905561015f3390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a4506001610101565b505f610101565b5f80604083850312156101bf575f80fd5b825160208401519092506001600160a01b03811681146101dd575f80fd5b809150509250929050565b6080516118916102075f395f818161018e01526101ba01526118915ff3fe608060405260043610610184575f3560e01c80637e82bb01116100d0578063c7bc4a6211610089578063cf6eefb711610063578063cf6eefb7146104b7578063d547741f146104f1578063d602b9fd14610510578063ea3b9ba114610524576101b5565b8063c7bc4a6214610470578063cc8463c81461048f578063cefc1429146104a3576101b5565b80637e82bb011461039b57806384ef8ffc146103c65780638da5cb5b146103f757806391d148541461040b578063a1eda53c1461042a578063a217fddf1461045d576101b5565b806336568abe1161013d5780634842855c116101175780634842855c1461031a578063634e93da1461033e578063649a5ec71461035d5780637e5692741461037c576101b5565b806336568abe146102d557806336702119146102f45780633805c6bd14610307576101b5565b806301ffc9a7146101df578063022d63fb146102135780630aa6220b1461023b5780631e6637201461024f578063248a9ca3146102885780632f2ff15d146102b6576101b5565b366101b5576101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b005b6101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b3480156101ea575f80fd5b506101fe6101f93660046114a8565b61057b565b60405190151581526020015b60405180910390f35b34801561021e575f80fd5b50620697805b60405165ffffffffffff909116815260200161020a565b348015610246575f80fd5b506101b36105a5565b34801561025a575f80fd5b5061027a6102693660046114cf565b60036020525f908152604090205481565b60405190815260200161020a565b348015610293575f80fd5b5061027a6102a23660046114cf565b5f9081526020819052604090206001015490565b3480156102c1575f80fd5b506101b36102d0366004611501565b6105ba565b3480156102e0575f80fd5b506101b36102ef366004611501565b6105e6565b6101b361030236600461152b565b610691565b6101b361031536600461159a565b610960565b348015610325575f80fd5b5061027a6d53455155454e4345525f524f4c4560901b81565b348015610349575f80fd5b506101b36103583660046115db565b610a31565b348015610368575f80fd5b506101b36103773660046115f4565b610a44565b348015610387575f80fd5b506101b361039636600461169a565b610a57565b3480156103a6575f80fd5b5061027a6103b53660046114cf565b60046020525f908152604090205481565b3480156103d1575f80fd5b506002546001600160a01b03165b6040516001600160a01b03909116815260200161020a565b348015610402575f80fd5b506103df610aa6565b348015610416575f80fd5b506101fe610425366004611501565b610abe565b348015610435575f80fd5b5061043e610ae6565b6040805165ffffffffffff93841681529290911660208301520161020a565b348015610468575f80fd5b5061027a5f81565b34801561047b575f80fd5b5061027a61048a36600461174d565b610b38565b34801561049a575f80fd5b50610224610bcd565b3480156104ae575f80fd5b506101b3610c2b565b3480156104c2575f80fd5b506104cb610c6a565b604080516001600160a01b03909316835265ffffffffffff90911660208301520161020a565b3480156104fc575f80fd5b506101b361050b366004611501565b610c8b565b34801561051b575f80fd5b506101b3610cb3565b6101b3610532366004611501565b604080518381523460208201526001600160a01b038316915f917fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f910160405180910390a35050565b5f6001600160e01b031982166318a4c3c360e11b148061059f575061059f82610cc5565b92915050565b5f6105af81610cf9565b6105b7610d03565b50565b816105d857604051631fe1e13d60e11b815260040160405180910390fd5b6105e28282610d0f565b5050565b8115801561060157506002546001600160a01b038281169116145b15610687575f80610610610c6a565b90925090506001600160a01b038216151580610632575065ffffffffffff8116155b8061064557504265ffffffffffff821610155b15610672576040516319ca5ebb60e01b815265ffffffffffff821660048201526024015b60405180910390fd5b50506001805465ffffffffffff60a01b191690555b6105e28282610d33565b345f5b8281101561095a575f8484838181106106af576106af611776565b90506080020160200160208101906106c791906115db565b6001600160a01b03160361077e578383828181106106e7576106e7611776565b90506080020160400160208101906106ff91906115db565b6001600160a01b03166108fc85858481811061071d5761071d611776565b9050608002016060013590811502906040515f60405180830381858888f1935050505015801561074f573d5f803e3d5ffd5b5083838281811061076257610762611776565b9050608002016060013582610777919061179e565b9150610875565b83838281811061079057610790611776565b90506080020160200160208101906107a891906115db565b6001600160a01b03166323b872dd338686858181106107c9576107c9611776565b90506080020160400160208101906107e191906115db565b8787868181106107f3576107f3611776565b6040516001600160e01b031960e088901b1681526001600160a01b039586166004820152949093166024850152506060608090920201013560448201526064016020604051808303815f875af115801561084f573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061087391906117b1565b505b83838281811061088757610887611776565b905060800201604001602081019061089f91906115db565b6001600160a01b03168484838181106108ba576108ba611776565b90506080020160200160208101906108d291906115db565b6001600160a01b03167fe93d7a771f81dc20f1d474f6868677269fdfa09830508e48edb0aa4d6569983386868581811061090e5761090e611776565b9050608002015f013587878681811061092957610929611776565b9050608002016060013560405161094a929190918252602082015260400190565b60405180910390a3600101610694565b50505050565b6040516323b872dd60e01b8152336004820152306024820152604481018290526001600160a01b038316906323b872dd906064016020604051808303815f875af11580156109b0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109d491906117b1565b50826001600160a01b0316826001600160a01b03167fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f8684604051610a23929190918252602082015260400190565b60405180910390a350505050565b5f610a3b81610cf9565b6105e282610d6b565b5f610a4e81610cf9565b6105e282610ddd565b610a648787878787610e4c565b7fc030727dea5440ebb1789967645e2595e4e67cf55821175a3f9f8b33aff41fa58282604051610a959291906117d0565b60405180910390a150505050505050565b5f610ab96002546001600160a01b031690565b905090565b5f918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b6002545f90600160d01b900465ffffffffffff168015158015610b1157504265ffffffffffff821610155b610b1c575f80610b30565b600254600160a01b900465ffffffffffff16815b915091509091565b81516020808401516060808601516040808801516080909801518151710696e6974342e73657175656e6365722e76360741b8188015246603282015260528101979097526072870194909452609286019190915260b28501969096521b6bffffffffffffffffffffffff191660d283015260e68083019390935283518083039093018352610106909101909252805191012090565b6002545f90600160d01b900465ffffffffffff168015158015610bf757504265ffffffffffff8216105b610c1257600154600160d01b900465ffffffffffff16610c25565b600254600160a01b900465ffffffffffff165b91505090565b5f610c34610c6a565b509050336001600160a01b03821614610c6257604051636116401160e11b8152336004820152602401610669565b6105b7611047565b6001546001600160a01b03811691600160a01b90910465ffffffffffff1690565b81610ca957604051631fe1e13d60e11b815260040160405180910390fd5b6105e282826110dd565b5f610cbd81610cf9565b6105b7611101565b5f6001600160e01b03198216637965db0b60e01b148061059f57506301ffc9a760e01b6001600160e01b031983161461059f565b6105b7813361110b565b610d0d5f80611144565b565b5f82815260208190526040902060010154610d2981610cf9565b61095a8383611203565b6001600160a01b0381163314610d5c5760405163334bd91960e11b815260040160405180910390fd5b610d668282611270565b505050565b5f610d74610bcd565b610d7d426112ac565b610d8791906117fe565b9050610d9382826112e2565b60405165ffffffffffff821681526001600160a01b038316907f3377dc44241e779dd06afab5b788a35ca5f3b778836e2990bdb26a2a4b2e5ed69060200160405180910390a25050565b5f610de78261135f565b610df0426112ac565b610dfa91906117fe565b9050610e068282611144565b6040805165ffffffffffff8085168252831660208201527ff1038c18cf84a56e432fdbfaf746924b7ea511dfe03a6506a0ceba4888788d9b910160405180910390a15050565b84515f90815260036020526040812080549082610e6883611824565b91905055905085602001518114610e9557604051635f64988d60e11b815260048101829052602401610669565b8560400151421115610eba576040516378fd448d60e01b815260040160405180910390fd5b5f610ec58787610b38565b604080515f8082526020820180845284905260ff89169282019290925260608101879052608081018690529192509060019060a0016020604051602081039080840390855afa158015610f1a573d5f803e3d5ffd5b505050602060405103519050610f416d53455155454e4345525f524f4c4560901b82610abe565b610f6957604051639a7d38d960e01b81526001600160a01b0382166004820152602401610669565b87515f90815260046020526040902054439003610f9957604051632ce0494b60e01b815260040160405180910390fd5b87515f908152600460205260409081902043905551610ff2908990815181526020808301519082015260408083015190820152606080830151908201526080918201516001600160a01b03169181019190915260a00190565b6040518091039020816001600160a01b03167f9c5702b5639f451bda4f9dba7fdf9d125a675ccddd315b81ce962d3ddd986a238960405161103591815260200190565b60405180910390a35050505050505050565b5f80611051610c6a565b915091506110668165ffffffffffff16151590565b158061107a57504265ffffffffffff821610155b156110a2576040516319ca5ebb60e01b815265ffffffffffff82166004820152602401610669565b6110bd5f6110b86002546001600160a01b031690565b611270565b506110c85f83611203565b5050600180546001600160d01b031916905550565b5f828152602081905260409020600101546110f781610cf9565b61095a8383611270565b610d0d5f806112e2565b6111158282610abe565b6105e25760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610669565b600254600160d01b900465ffffffffffff1680156111c6574265ffffffffffff8216101561119d57600254600180546001600160d01b0316600160a01b90920465ffffffffffff16600160d01b029190911790556111c6565b6040517f2b1fa2edafe6f7b9e97c1a9e0c3660e645beb2dcaa2d45bdbf9beaf5472e1ec5905f90a15b50600280546001600160a01b0316600160a01b65ffffffffffff948516026001600160d01b031617600160d01b9290931691909102919091179055565b5f8261125f575f61121c6002546001600160a01b031690565b6001600160a01b03161461124357604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b61126983836113b0565b9392505050565b5f8215801561128c57506002546001600160a01b038381169116145b156112a257600280546001600160a01b03191690555b611269838361143f565b5f65ffffffffffff8211156112de576040516306dfcc6560e41b81526030600482015260248101839052604401610669565b5090565b5f6112eb610c6a565b6001805465ffffffffffff8616600160a01b026001600160d01b03199091166001600160a01b03881617179055915061132d90508165ffffffffffff16151590565b15610d66576040517f8886ebfc4259abdbc16601dd8fb5678e54878f47b3c34836cfc51154a9605109905f90a1505050565b5f80611369610bcd565b90508065ffffffffffff168365ffffffffffff16116113915761138c838261183c565b611269565b61126965ffffffffffff8416620697805f828218828410028218611269565b5f6113bb8383610abe565b611438575f838152602081815260408083206001600160a01b03861684529091529020805460ff191660011790556113f03390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a450600161059f565b505f61059f565b5f61144a8383610abe565b15611438575f838152602081815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a450600161059f565b5f602082840312156114b8575f80fd5b81356001600160e01b031981168114611269575f80fd5b5f602082840312156114df575f80fd5b5035919050565b80356001600160a01b03811681146114fc575f80fd5b919050565b5f8060408385031215611512575f80fd5b82359150611522602084016114e6565b90509250929050565b5f806020838503121561153c575f80fd5b823567ffffffffffffffff80821115611553575f80fd5b818501915085601f830112611566575f80fd5b813581811115611574575f80fd5b8660208260071b8501011115611588575f80fd5b60209290920196919550909350505050565b5f805f80608085870312156115ad575f80fd5b843593506115bd602086016114e6565b92506115cb604086016114e6565b9396929550929360600135925050565b5f602082840312156115eb575f80fd5b611269826114e6565b5f60208284031215611604575f80fd5b813565ffffffffffff81168114611269575f80fd5b5f60a08284031215611629575f80fd5b60405160a0810181811067ffffffffffffffff8211171561165857634e487b7160e01b5f52604160045260245ffd5b80604052508091508235815260208301356020820152604083013560408201526060830135606082015261168e608084016114e6565b60808201525092915050565b5f805f805f805f610140888a0312156116b1575f80fd5b6116bb8989611619565b965060a0880135955060c088013560ff811681146116d7575f80fd5b945060e08801359350610100880135925061012088013567ffffffffffffffff80821115611703575f80fd5b818a0191508a601f830112611716575f80fd5b813581811115611724575f80fd5b8b6020828501011115611735575f80fd5b60208301945080935050505092959891949750929550565b5f8060c0838503121561175e575f80fd5b6117688484611619565b9460a0939093013593505050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b8181038181111561059f5761059f61178a565b5f602082840312156117c1575f80fd5b81518015158114611269575f80fd5b60208152816020820152818360408301375f818301604090810191909152601f909201601f19160101919050565b65ffffffffffff81811683821601908082111561181d5761181d61178a565b5092915050565b5f600182016118355761183561178a565b5060010190565b65ffffffffffff82811682821603908082111561181d5761181d61178a56fea2646970667358221220111de8e40c8e2761ed9ab04f385dfef1dffcd646c5a270f4fc3dc0858a0d605764736f6c63430008190033","sourceMap":"281:7248:35:-:0;;;3619:155;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;2256:44:34;;;;3753:6:35;3761:5;-1:-1:-1;;;;;2384:33:23;;2380:115;;2440:44;;-1:-1:-1;;;2440:44:23;;2481:1;2440:44;;;516:51:38;489:18;;2440:44:23;;;;;;;2380:115;2504:13;:28;;-1:-1:-1;;;;;2504:28:23;-1:-1:-1;;;2504:28:23;;;;;;;2542:51;-1:-1:-1;2573:19:23;2542:10;:51::i;:::-;;2308:292;;3619:155:35;;281:7248;;5509:370:23;5595:4;5615;5611:214;;5687:1;5661:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;5661:14;-1:-1:-1;;;;;5661:28:23;;5657:114;;5716:40;;-1:-1:-1;;;5716:40:23;;;;;;;;;;;5657:114;5784:20;:30;;-1:-1:-1;;;;;;5784:30:23;-1:-1:-1;;;;;5784:30:23;;;;;5611:214;5841:31;5858:4;5864:7;5841:16;:31::i;:::-;5834:38;;5509:370;;;;;:::o;6179:316:21:-;6256:4;2954:12;;;;;;;;;;;-1:-1:-1;;;;;2954:29:21;;;;;;;;;;;;6272:217;;6315:6;:12;;;;;;;;;;;-1:-1:-1;;;;;6315:29:21;;;;;;;;;:36;;-1:-1:-1;;6315:36:21;6347:4;6315:36;;;6397:12;735:10:27;;656:96;6397:12:21;-1:-1:-1;;;;;6370:40:21;6388:7;-1:-1:-1;;;;;6370:40:21;6382:4;6370:40;;;;;;;;;;-1:-1:-1;6431:4:21;6424:11;;6272:217;-1:-1:-1;6473:5:21;6466:12;;14:351:38;93:6;101;154:2;142:9;133:7;129:23;125:32;122:52;;;170:1;167;160:12;122:52;193:16;;252:2;237:18;;231:25;193:16;;-1:-1:-1;;;;;;285:31:38;;275:42;;265:70;;331:1;328;321:12;265:70;354:5;344:15;;;14:351;;;;;:::o;370:203::-;281:7248:35;;;;;;;;;;;;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x608060405260043610610184575f3560e01c80637e82bb01116100d0578063c7bc4a6211610089578063cf6eefb711610063578063cf6eefb7146104b7578063d547741f146104f1578063d602b9fd14610510578063ea3b9ba114610524576101b5565b8063c7bc4a6214610470578063cc8463c81461048f578063cefc1429146104a3576101b5565b80637e82bb011461039b57806384ef8ffc146103c65780638da5cb5b146103f757806391d148541461040b578063a1eda53c1461042a578063a217fddf1461045d576101b5565b806336568abe1161013d5780634842855c116101175780634842855c1461031a578063634e93da1461033e578063649a5ec71461035d5780637e5692741461037c576101b5565b806336568abe146102d557806336702119146102f45780633805c6bd14610307576101b5565b806301ffc9a7146101df578063022d63fb146102135780630aa6220b1461023b5780631e6637201461024f578063248a9ca3146102885780632f2ff15d146102b6576101b5565b366101b5576101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b005b6101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b3480156101ea575f80fd5b506101fe6101f93660046114a8565b61057b565b60405190151581526020015b60405180910390f35b34801561021e575f80fd5b50620697805b60405165ffffffffffff909116815260200161020a565b348015610246575f80fd5b506101b36105a5565b34801561025a575f80fd5b5061027a6102693660046114cf565b60036020525f908152604090205481565b60405190815260200161020a565b348015610293575f80fd5b5061027a6102a23660046114cf565b5f9081526020819052604090206001015490565b3480156102c1575f80fd5b506101b36102d0366004611501565b6105ba565b3480156102e0575f80fd5b506101b36102ef366004611501565b6105e6565b6101b361030236600461152b565b610691565b6101b361031536600461159a565b610960565b348015610325575f80fd5b5061027a6d53455155454e4345525f524f4c4560901b81565b348015610349575f80fd5b506101b36103583660046115db565b610a31565b348015610368575f80fd5b506101b36103773660046115f4565b610a44565b348015610387575f80fd5b506101b361039636600461169a565b610a57565b3480156103a6575f80fd5b5061027a6103b53660046114cf565b60046020525f908152604090205481565b3480156103d1575f80fd5b506002546001600160a01b03165b6040516001600160a01b03909116815260200161020a565b348015610402575f80fd5b506103df610aa6565b348015610416575f80fd5b506101fe610425366004611501565b610abe565b348015610435575f80fd5b5061043e610ae6565b6040805165ffffffffffff93841681529290911660208301520161020a565b348015610468575f80fd5b5061027a5f81565b34801561047b575f80fd5b5061027a61048a36600461174d565b610b38565b34801561049a575f80fd5b50610224610bcd565b3480156104ae575f80fd5b506101b3610c2b565b3480156104c2575f80fd5b506104cb610c6a565b604080516001600160a01b03909316835265ffffffffffff90911660208301520161020a565b3480156104fc575f80fd5b506101b361050b366004611501565b610c8b565b34801561051b575f80fd5b506101b3610cb3565b6101b3610532366004611501565b604080518381523460208201526001600160a01b038316915f917fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f910160405180910390a35050565b5f6001600160e01b031982166318a4c3c360e11b148061059f575061059f82610cc5565b92915050565b5f6105af81610cf9565b6105b7610d03565b50565b816105d857604051631fe1e13d60e11b815260040160405180910390fd5b6105e28282610d0f565b5050565b8115801561060157506002546001600160a01b038281169116145b15610687575f80610610610c6a565b90925090506001600160a01b038216151580610632575065ffffffffffff8116155b8061064557504265ffffffffffff821610155b15610672576040516319ca5ebb60e01b815265ffffffffffff821660048201526024015b60405180910390fd5b50506001805465ffffffffffff60a01b191690555b6105e28282610d33565b345f5b8281101561095a575f8484838181106106af576106af611776565b90506080020160200160208101906106c791906115db565b6001600160a01b03160361077e578383828181106106e7576106e7611776565b90506080020160400160208101906106ff91906115db565b6001600160a01b03166108fc85858481811061071d5761071d611776565b9050608002016060013590811502906040515f60405180830381858888f1935050505015801561074f573d5f803e3d5ffd5b5083838281811061076257610762611776565b9050608002016060013582610777919061179e565b9150610875565b83838281811061079057610790611776565b90506080020160200160208101906107a891906115db565b6001600160a01b03166323b872dd338686858181106107c9576107c9611776565b90506080020160400160208101906107e191906115db565b8787868181106107f3576107f3611776565b6040516001600160e01b031960e088901b1681526001600160a01b039586166004820152949093166024850152506060608090920201013560448201526064016020604051808303815f875af115801561084f573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061087391906117b1565b505b83838281811061088757610887611776565b905060800201604001602081019061089f91906115db565b6001600160a01b03168484838181106108ba576108ba611776565b90506080020160200160208101906108d291906115db565b6001600160a01b03167fe93d7a771f81dc20f1d474f6868677269fdfa09830508e48edb0aa4d6569983386868581811061090e5761090e611776565b9050608002015f013587878681811061092957610929611776565b9050608002016060013560405161094a929190918252602082015260400190565b60405180910390a3600101610694565b50505050565b6040516323b872dd60e01b8152336004820152306024820152604481018290526001600160a01b038316906323b872dd906064016020604051808303815f875af11580156109b0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109d491906117b1565b50826001600160a01b0316826001600160a01b03167fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f8684604051610a23929190918252602082015260400190565b60405180910390a350505050565b5f610a3b81610cf9565b6105e282610d6b565b5f610a4e81610cf9565b6105e282610ddd565b610a648787878787610e4c565b7fc030727dea5440ebb1789967645e2595e4e67cf55821175a3f9f8b33aff41fa58282604051610a959291906117d0565b60405180910390a150505050505050565b5f610ab96002546001600160a01b031690565b905090565b5f918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b6002545f90600160d01b900465ffffffffffff168015158015610b1157504265ffffffffffff821610155b610b1c575f80610b30565b600254600160a01b900465ffffffffffff16815b915091509091565b81516020808401516060808601516040808801516080909801518151710696e6974342e73657175656e6365722e76360741b8188015246603282015260528101979097526072870194909452609286019190915260b28501969096521b6bffffffffffffffffffffffff191660d283015260e68083019390935283518083039093018352610106909101909252805191012090565b6002545f90600160d01b900465ffffffffffff168015158015610bf757504265ffffffffffff8216105b610c1257600154600160d01b900465ffffffffffff16610c25565b600254600160a01b900465ffffffffffff165b91505090565b5f610c34610c6a565b509050336001600160a01b03821614610c6257604051636116401160e11b8152336004820152602401610669565b6105b7611047565b6001546001600160a01b03811691600160a01b90910465ffffffffffff1690565b81610ca957604051631fe1e13d60e11b815260040160405180910390fd5b6105e282826110dd565b5f610cbd81610cf9565b6105b7611101565b5f6001600160e01b03198216637965db0b60e01b148061059f57506301ffc9a760e01b6001600160e01b031983161461059f565b6105b7813361110b565b610d0d5f80611144565b565b5f82815260208190526040902060010154610d2981610cf9565b61095a8383611203565b6001600160a01b0381163314610d5c5760405163334bd91960e11b815260040160405180910390fd5b610d668282611270565b505050565b5f610d74610bcd565b610d7d426112ac565b610d8791906117fe565b9050610d9382826112e2565b60405165ffffffffffff821681526001600160a01b038316907f3377dc44241e779dd06afab5b788a35ca5f3b778836e2990bdb26a2a4b2e5ed69060200160405180910390a25050565b5f610de78261135f565b610df0426112ac565b610dfa91906117fe565b9050610e068282611144565b6040805165ffffffffffff8085168252831660208201527ff1038c18cf84a56e432fdbfaf746924b7ea511dfe03a6506a0ceba4888788d9b910160405180910390a15050565b84515f90815260036020526040812080549082610e6883611824565b91905055905085602001518114610e9557604051635f64988d60e11b815260048101829052602401610669565b8560400151421115610eba576040516378fd448d60e01b815260040160405180910390fd5b5f610ec58787610b38565b604080515f8082526020820180845284905260ff89169282019290925260608101879052608081018690529192509060019060a0016020604051602081039080840390855afa158015610f1a573d5f803e3d5ffd5b505050602060405103519050610f416d53455155454e4345525f524f4c4560901b82610abe565b610f6957604051639a7d38d960e01b81526001600160a01b0382166004820152602401610669565b87515f90815260046020526040902054439003610f9957604051632ce0494b60e01b815260040160405180910390fd5b87515f908152600460205260409081902043905551610ff2908990815181526020808301519082015260408083015190820152606080830151908201526080918201516001600160a01b03169181019190915260a00190565b6040518091039020816001600160a01b03167f9c5702b5639f451bda4f9dba7fdf9d125a675ccddd315b81ce962d3ddd986a238960405161103591815260200190565b60405180910390a35050505050505050565b5f80611051610c6a565b915091506110668165ffffffffffff16151590565b158061107a57504265ffffffffffff821610155b156110a2576040516319ca5ebb60e01b815265ffffffffffff82166004820152602401610669565b6110bd5f6110b86002546001600160a01b031690565b611270565b506110c85f83611203565b5050600180546001600160d01b031916905550565b5f828152602081905260409020600101546110f781610cf9565b61095a8383611270565b610d0d5f806112e2565b6111158282610abe565b6105e25760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610669565b600254600160d01b900465ffffffffffff1680156111c6574265ffffffffffff8216101561119d57600254600180546001600160d01b0316600160a01b90920465ffffffffffff16600160d01b029190911790556111c6565b6040517f2b1fa2edafe6f7b9e97c1a9e0c3660e645beb2dcaa2d45bdbf9beaf5472e1ec5905f90a15b50600280546001600160a01b0316600160a01b65ffffffffffff948516026001600160d01b031617600160d01b9290931691909102919091179055565b5f8261125f575f61121c6002546001600160a01b031690565b6001600160a01b03161461124357604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b61126983836113b0565b9392505050565b5f8215801561128c57506002546001600160a01b038381169116145b156112a257600280546001600160a01b03191690555b611269838361143f565b5f65ffffffffffff8211156112de576040516306dfcc6560e41b81526030600482015260248101839052604401610669565b5090565b5f6112eb610c6a565b6001805465ffffffffffff8616600160a01b026001600160d01b03199091166001600160a01b03881617179055915061132d90508165ffffffffffff16151590565b15610d66576040517f8886ebfc4259abdbc16601dd8fb5678e54878f47b3c34836cfc51154a9605109905f90a1505050565b5f80611369610bcd565b90508065ffffffffffff168365ffffffffffff16116113915761138c838261183c565b611269565b61126965ffffffffffff8416620697805f828218828410028218611269565b5f6113bb8383610abe565b611438575f838152602081815260408083206001600160a01b03861684529091529020805460ff191660011790556113f03390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a450600161059f565b505f61059f565b5f61144a8383610abe565b15611438575f838152602081815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a450600161059f565b5f602082840312156114b8575f80fd5b81356001600160e01b031981168114611269575f80fd5b5f602082840312156114df575f80fd5b5035919050565b80356001600160a01b03811681146114fc575f80fd5b919050565b5f8060408385031215611512575f80fd5b82359150611522602084016114e6565b90509250929050565b5f806020838503121561153c575f80fd5b823567ffffffffffffffff80821115611553575f80fd5b818501915085601f830112611566575f80fd5b813581811115611574575f80fd5b8660208260071b8501011115611588575f80fd5b60209290920196919550909350505050565b5f805f80608085870312156115ad575f80fd5b843593506115bd602086016114e6565b92506115cb604086016114e6565b9396929550929360600135925050565b5f602082840312156115eb575f80fd5b611269826114e6565b5f60208284031215611604575f80fd5b813565ffffffffffff81168114611269575f80fd5b5f60a08284031215611629575f80fd5b60405160a0810181811067ffffffffffffffff8211171561165857634e487b7160e01b5f52604160045260245ffd5b80604052508091508235815260208301356020820152604083013560408201526060830135606082015261168e608084016114e6565b60808201525092915050565b5f805f805f805f610140888a0312156116b1575f80fd5b6116bb8989611619565b965060a0880135955060c088013560ff811681146116d7575f80fd5b945060e08801359350610100880135925061012088013567ffffffffffffffff80821115611703575f80fd5b818a0191508a601f830112611716575f80fd5b813581811115611724575f80fd5b8b6020828501011115611735575f80fd5b60208301945080935050505092959891949750929550565b5f8060c0838503121561175e575f80fd5b6117688484611619565b9460a0939093013593505050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b8181038181111561059f5761059f61178a565b5f602082840312156117c1575f80fd5b81518015158114611269575f80fd5b60208152816020820152818360408301375f818301604090810191909152601f909201601f19160101919050565b65ffffffffffff81811683821601908082111561181d5761181d61178a565b5092915050565b5f600182016118355761183561178a565b5060010190565b65ffffffffffff82811682821603908082111561181d5761181d61178a56fea2646970667358221220111de8e40c8e2761ed9ab04f385dfef1dffcd646c5a270f4fc3dc0858a0d605764736f6c63430008190033","sourceMap":"281:7248:35:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;2632:39:34;2638:20;2660:10;2632:5;:39::i;:::-;281:7248:35;;2447:39:34;2453:20;2475:10;2447:5;:39::i;2667:219:23:-;;;;;;;;;;-1:-1:-1;2667:219:23;;;;;:::i;:::-;;:::i;:::-;;;470:14:38;;463:22;445:41;;433:2;418:18;2667:219:23;;;;;;;;7766:108;;;;;;;;;;-1:-1:-1;7861:6:23;7766:108;;;671:14:38;659:27;;;641:46;;629:2;614:18;7766:108:23;497:196:38;10927:126:23;;;;;;;;;;;;;:::i;1478:47:35:-;;;;;;;;;;-1:-1:-1;1478:47:35;;;;;:::i;:::-;;;;;;;;;;;;;;;;;1029:25:38;;;1017:2;1002:18;1478:47:35;883:177:38;3810:120:21;;;;;;;;;;-1:-1:-1;3810:120:21;;;;;:::i;:::-;3875:7;3901:12;;;;;;;;;;:22;;;;3810:120;3198:265:23;;;;;;;;;;-1:-1:-1;3198:265:23;;;;;:::i;:::-;;:::i;4515:566::-;;;;;;;;;;-1:-1:-1;4515:566:23;;;;;:::i;:::-;;:::i;5794:881:34:-;;;;;;:::i;:::-;;:::i;3733:254::-;;;;;;:::i;:::-;;:::i;1256:66:35:-;;;;;;;;;;;;-1:-1:-1;;;1256:66:35;;8068:150:23;;;;;;;;;;-1:-1:-1;8068:150:23;;;;;:::i;:::-;;:::i;10296:145::-;;;;;;;;;;-1:-1:-1;10296:145:23;;;;;:::i;:::-;;:::i;5410:287:35:-;;;;;;;;;;-1:-1:-1;5410:287:35;;;;;:::i;:::-;;:::i;1708:55::-;;;;;;;;;;-1:-1:-1;1708:55:35;;;;;:::i;:::-;;;;;;;;;;;;;;6707:106:23;;;;;;;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;6786:20:23;6707:106;;;-1:-1:-1;;;;;5436:32:38;;;5418:51;;5406:2;5391:18;6707:106:23;5272:203:38;2942:93:23;;;;;;;;;;;;;:::i;2854:136:21:-;;;;;;;;;;-1:-1:-1;2854:136:21;;;;;:::i;:::-;;:::i;7432:261:23:-;;;;;;;;;;;;;:::i;:::-;;;;5660:14:38;5701:15;;;5683:34;;5753:15;;;;5748:2;5733:18;;5726:43;5623:18;7432:261:23;5480:295:38;2187:49:21;;;;;;;;;;-1:-1:-1;2187:49:21;2232:4;2187:49;;7068:459:35;;;;;;;;;;-1:-1:-1;7068:459:35;;;;;:::i;:::-;;:::i;7130:229:23:-;;;;;;;;;;;;;:::i;9146:344::-;;;;;;;;;;;;;:::i;6886:171::-;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;;;6281:32:38;;;6263:51;;6362:14;6350:27;;;6345:2;6330:18;;6323:55;6236:18;6886:171:23;6091:293:38;3563:267:23;;;;;;;;;;-1:-1:-1;3563:267:23;;;;;:::i;:::-;;:::i;8706:128::-;;;;;;;;;;;;;:::i;3056:160:34:-;;;;;;:::i;:::-;3149:60;;;6822:25:38;;;3199:9:34;6878:2:38;6863:18;;6856:34;-1:-1:-1;;;;;3149:60:34;;;3178:1;;3149:60;;6795:18:38;3149:60:34;;;;;;;3056:160;;:::o;2667:219:23:-;2752:4;-1:-1:-1;;;;;;2775:64:23;;-1:-1:-1;;;2775:64:23;;:104;;;2843:36;2867:11;2843:23;:36::i;:::-;2768:111;2667:219;-1:-1:-1;;2667:219:23:o;10927:126::-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;11018:28:23::1;:26;:28::i;:::-;10927:126:::0;:::o;3198:265::-;3317:4;3313:104;;3366:40;;-1:-1:-1;;;3366:40:23;;;;;;;;;;;3313:104;3426:30;3442:4;3448:7;3426:15;:30::i;:::-;3198:265;;:::o;4515:566::-;4637:26;;:55;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;4667:25:23;;;6786:20;;4667:25;4637:55;4633:399;;;4709:23;4734:15;4753:21;:19;:21::i;:::-;4708:66;;-1:-1:-1;4708:66:23;-1:-1:-1;;;;;;4792:29:23;;;;;:58;;-1:-1:-1;14557:13:23;;;;4792:58;:91;;;-1:-1:-1;14785:15:23;14774:26;;;;4854:29;4792:91;4788:185;;;4910:48;;-1:-1:-1;;;4910:48:23;;671:14:38;659:27;;4910:48:23;;;641:46:38;614:18;;4910:48:23;;;;;;;;4788:185;-1:-1:-1;;4993:28:23;4986:35;;-1:-1:-1;;;;4986:35:23;;;4633:399;5041:33;5060:4;5066:7;5041:18;:33::i;5794:881:34:-;5895:9;5872:20;5914:755;5934:17;;;5914:755;;;6033:1;6006:6;;6013:1;6006:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6006:29:34;;6002:527;;6121:6;;6128:1;6121:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6113:37:34;:55;6151:6;;6158:1;6151:9;;;;;;;:::i;:::-;;;;;;:16;;;6113:55;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;6320:6;;6327:1;6320:9;;;;;;;:::i;:::-;;;;;;:16;;;6304:32;;;;;:::i;:::-;;;6002:527;;;6434:6;;6441:1;6434:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6427:36:34;;6464:10;6476:6;;6483:1;6476:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;6497:6;;6504:1;6497:9;;;;;;;:::i;:::-;6427:87;;-1:-1:-1;;;;;;6427:87:34;;;;;;;-1:-1:-1;;;;;7556:15:38;;;6427:87:34;;;7538:34:38;7608:15;;;;7588:18;;;7581:43;-1:-1:-1;6497:16:34;:9;;;;;:16;;7640:18:38;;;7633:34;7473:18;;6427:87:34;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;6002:527;6620:6;;6627:1;6620:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6567:91:34;6603:6;;6610:1;6603:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6567:91:34;;6578:6;;6585:1;6578:9;;;;;;;:::i;:::-;;;;;;:23;;;6641:6;;6648:1;6641:9;;;;;;;:::i;:::-;;;;;;:16;;;6567:91;;;;;;6822:25:38;;;6878:2;6863:18;;6856:34;6810:2;6795:18;;6648:248;6567:91:34;;;;;;;;5953:3;;5914:755;;;;5862:813;5794:881;;:::o;3733:254::-;3852:61;;-1:-1:-1;;;3852:61:34;;3879:10;3852:61;;;7538:34:38;3899:4:34;7588:18:38;;;7581:43;7640:18;;;7633:34;;;-1:-1:-1;;;;;3852:26:34;;;;;7473:18:38;;3852:61:34;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;3956:15;-1:-1:-1;;;;;3928:52:34;3949:5;-1:-1:-1;;;;;3928:52:34;;3934:13;3973:6;3928:52;;;;;;6822:25:38;;;6878:2;6863:18;;6856:34;6810:2;6795:18;;6648:248;3928:52:34;;;;;;;;3733:254;;;;:::o;8068:150:23:-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;8175:36:23::1;8202:8;8175:26;:36::i;10296:145::-:0;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;10400:34:23::1;10425:8;10400:24;:34::i;5410:287:35:-:0;5611:44;5624:6;5632:13;5647:1;5650;5653;5611:12;:44::i;:::-;5670:20;5680:9;;5670:20;;;;;;;:::i;:::-;;;;;;;;5410:287;;;;;;;:::o;2942:93:23:-;2988:7;3014:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;3014:14;3007:21;;2942:93;:::o;2854:136:21:-;2931:4;2954:12;;;;;;;;;;;-1:-1:-1;;;;;2954:29:21;;;;;;;;;;;;;;;2854:136::o;7432:261:23:-;7552:21;;7497:15;;-1:-1:-1;;;7552:21:23;;;;14557:13;;;7591:57;;;;-1:-1:-1;14785:15:23;14774:26;;;;7619:29;7591:57;7590:96;;7681:1;7684;7590:96;;;7653:13;;-1:-1:-1;;;7653:13:23;;;;7668:8;7590:96;7583:103;;;;7432:261;;:::o;7068:459:35:-;7304:20;;7338:15;;;;;7367;;;;;7396:16;;;;;7426:20;;;;;7213:270;;-1:-1:-1;;;7213:270:35;;;8751:33:38;7277:13:35;8800:12:38;;;8793:28;8837:12;;;8830:28;;;;8874:12;;;8867:28;;;;8911:13;;;8904:29;;;;8949:13;;;8942:29;;;;9006:15;-1:-1:-1;;9002:53:38;8987:13;;;8980:76;9072:13;;;;9065:29;;;;7213:270:35;;;;;;;;;;9110:13:38;;;;7213:270:35;;;7502:18;;;;;;7068:459::o;7130:229:23:-;7224:21;;7188:6;;-1:-1:-1;;;7224:21:23;;;;14557:13;;;7263:56;;;;-1:-1:-1;14785:15:23;14774:26;;;;7291:28;7262:90;;7339:13;;-1:-1:-1;;;7339:13:23;;;;7262:90;;;7323:13;;-1:-1:-1;;;7323:13:23;;;;7262:90;7255:97;;;7130:229;:::o;9146:344::-;9210:23;9239:21;:19;:21::i;:::-;-1:-1:-1;9209:51:23;-1:-1:-1;735:10:27;-1:-1:-1;;;;;9274:31:23;;;9270:175;;9388:46;;-1:-1:-1;;;9388:46:23;;735:10:27;9388:46:23;;;5418:51:38;5391:18;;9388:46:23;5272:203:38;9270:175:23;9454:29;:27;:29::i;6886:171::-;6999:20;;-1:-1:-1;;;;;6999:20:23;;;-1:-1:-1;;;7021:28:23;;;;;;6886:171::o;3563:267::-;3683:4;3679:104;;3732:40;;-1:-1:-1;;;3732:40:23;;;;;;;;;;;3679:104;3792:31;3809:4;3815:7;3792:16;:31::i;8706:128::-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;8798:29:23::1;:27;:29::i;2565:202:21:-:0;2650:4;-1:-1:-1;;;;;;2673:47:21;;-1:-1:-1;;;2673:47:21;;:87;;-1:-1:-1;;;;;;;;;;862:40:29;;;2724:36:21;763:146:29;3199:103:21;3265:30;3276:4;735:10:27;3265::21;:30::i;11180:94:23:-;11245:22;11262:1;11265;11245:16;:22::i;:::-;11180:94::o;4226:136:21:-;3875:7;3901:12;;;;;;;;;;:22;;;2464:16;2475:4;2464:10;:16::i;:::-;4330:25:::1;4341:4;4347:7;4330:10;:25::i;5328:245::-:0;-1:-1:-1;;;;;5421:34:21;;735:10:27;5421:34:21;5417:102;;5478:30;;-1:-1:-1;;;5478:30:21;;;;;;;;;;;5417:102;5529:37;5541:4;5547:18;5529:11;:37::i;:::-;;5328:245;;:::o;8345:288:23:-;8426:18;8484:19;:17;:19::i;:::-;8447:34;8465:15;8447:17;:34::i;:::-;:56;;;;:::i;:::-;8426:77;;8513:46;8537:8;8547:11;8513:23;:46::i;:::-;8574:52;;671:14:38;659:27;;641:46;;-1:-1:-1;;;;;8574:52:23;;;;;629:2:38;614:18;8574:52:23;;;;;;;8416:217;8345:288;:::o;10566:::-;10644:18;10702:26;10719:8;10702:16;:26::i;:::-;10665:34;10683:15;10665:17;:34::i;:::-;:63;;;;:::i;:::-;10644:84;;10738:39;10755:8;10765:11;10738:16;:39::i;:::-;10792:55;;;5660:14:38;5701:15;;;5683:34;;5753:15;;5748:2;5733:18;;5726:43;10792:55:23;;5623:18:38;10792:55:23;;;;;;;10634:220;10566:288;:::o;5703:1152:35:-;5931:20;;5894:21;5918:34;;;:12;:34;;;;;:36;;;5894:21;5918:36;;;:::i;:::-;;;;;5894:60;;5985:6;:15;;;5968:13;:32;5964:71;;6009:26;;-1:-1:-1;;;6009:26:35;;;;;1029:25:38;;;1002:18;;6009:26:35;883:177:38;5964:71:35;6121:6;:16;;;6103:15;:34;6099:61;;;6146:14;;-1:-1:-1;;;6146:14:35;;;;;;;;;;;6099:61;6232:19;6254:38;6270:6;6278:13;6254:15;:38::i;:::-;6322:31;;;6302:17;6322:31;;;;;;;;;9682:25:38;;;9755:4;9743:17;;9723:18;;;9716:45;;;;9777:18;;;9770:34;;;9820:18;;;9813:34;;;6232:60:35;;-1:-1:-1;6302:17:35;6322:31;;9654:19:38;;6322:31:35;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;6302:51;;6440:34;-1:-1:-1;;;6464:9:35;6440:7;:34::i;:::-;6435:71;;6483:23;;-1:-1:-1;;;6483:23:35;;-1:-1:-1;;;;;5436:32:38;;6483:23:35;;;5418:51:38;5391:18;;6483:23:35;5272:203:38;6435:71:35;6621:20;;6600:42;;;;:20;:42;;;;;;6646:12;6600:58;;6596:99;;6667:28;;-1:-1:-1;;;6667:28:35;;;;;;;;;;;6596:99;6726:20;;6705:42;;;;:20;:42;;;;;;;6750:12;6705:57;;6800:48;;;6726:6;;10059:13:38;;10047:26;;10123:4;10111:17;;;10105:24;10089:14;;;10082:48;10178:2;10166:15;;;10160:22;10146:12;;;10139:44;10231:2;10219:15;;;10213:22;10199:12;;;10192:44;10289:3;10277:16;;;10271:23;-1:-1:-1;;;;;10267:49:38;10252:13;;;10245:72;;;;10304:3;10333:13;;9858:494;6800:48:35;;;;;;;;6815:9;-1:-1:-1;;;;;6800:48:35;;6834:13;6800:48;;;;1029:25:38;;1017:2;1002:18;;883:177;6800:48:35;;;;;;;;5815:1040;;;5703:1152;;;;;:::o;9618:474:23:-;9685:16;9703:15;9722:21;:19;:21::i;:::-;9684:59;;;;9758:24;9773:8;14557:13;;;;;14471:106;9758:24;9757:25;:58;;;-1:-1:-1;14785:15:23;14774:26;;;;9786:29;9757:58;9753:144;;;9838:48;;-1:-1:-1;;;9838:48:23;;671:14:38;659:27;;9838:48:23;;;641:46:38;614:18;;9838:48:23;497:196:38;9753:144:23;9906:47;2232:4:21;9938:14:23;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;9938:14;9906:11;:47::i;:::-;-1:-1:-1;9963:40:23;2232:4:21;9994:8:23;9963:10;:40::i;:::-;-1:-1:-1;;10020:20:23;10013:27;;-1:-1:-1;;;;;;10050:35:23;;;-1:-1:-1;9618:474:23:o;4642:138:21:-;3875:7;3901:12;;;;;;;;;;:22;;;2464:16;2475:4;2464:10;:16::i;:::-;4747:26:::1;4759:4;4765:7;4747:11;:26::i;8962:111:23:-:0;9028:38;9060:1;9064;9028:23;:38::i;3432:197:21:-;3520:22;3528:4;3534:7;3520;:22::i;:::-;3515:108;;3565:47;;-1:-1:-1;;;3565:47:21;;-1:-1:-1;;;;;10549:32:38;;3565:47:21;;;10531:51:38;10598:18;;;10591:34;;;10504:18;;3565:47:21;10357:274:38;13741:585:23;13843:21;;-1:-1:-1;;;13843:21:23;;;;14557:13;;13875:365;;14785:15;14774:26;;;;13922:308;;;14040:13;;14024;:29;;-1:-1:-1;;;;;14024:29:23;-1:-1:-1;;;14040:13:23;;;;;-1:-1:-1;;;14024:29:23;;;;;;;13922:308;;;14182:33;;;;;;;13922:308;-1:-1:-1;14250:13:23;:24;;-1:-1:-1;;;;;14284:35:23;-1:-1:-1;;;14250:24:23;;;;;-1:-1:-1;;;;;14284:35:23;;-1:-1:-1;;;14284:35:23;;;;;;;;;;;;;;13741:585::o;5509:370::-;5595:4;5615;5611:214;;5687:1;5661:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;5661:14;-1:-1:-1;;;;;5661:28:23;;5657:114;;5716:40;;-1:-1:-1;;;5716:40:23;;;;;;;;;;;5657:114;5784:20;:30;;-1:-1:-1;;;;;;5784:30:23;-1:-1:-1;;;;;5784:30:23;;;;;5611:214;5841:31;5858:4;5864:7;5841:16;:31::i;:::-;5834:38;5509:370;-1:-1:-1;;;5509:370:23:o;5946:271::-;6033:4;6053:26;;:55;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;6083:25:23;;;6786:20;;6083:25;6053:55;6049:113;;;6131:20;6124:27;;-1:-1:-1;;;;;;6124:27:23;;;6049:113;6178:32;6196:4;6202:7;6178:17;:32::i;14296:213:32:-;14352:6;14382:16;14374:24;;14370:103;;;14421:41;;-1:-1:-1;;;14421:41:32;;14452:2;14421:41;;;10817:36:38;10869:18;;;10862:34;;;10790:18;;14421:41:32;10636:266:38;14370:103:32;-1:-1:-1;14496:5:32;14296:213::o;13062:525:23:-;13154:18;13176:21;:19;:21::i;:::-;13208:20;:31;;13249:42;;;-1:-1:-1;;;13249:42:23;-1:-1:-1;;;;;;13249:42:23;;;-1:-1:-1;;;;;13208:31:23;;13249:42;;;;13151:46;-1:-1:-1;13403:27:23;;-1:-1:-1;13151:46:23;14557:13;;;;;14471:106;13403:27;13399:182;;;13540:30;;;;;;;13141:446;13062:525;;:::o;11621:1249::-;11695:6;11713:19;11735;:17;:19::i;:::-;11713:41;;12684:12;12673:23;;:8;:23;;;:190;;12840:23;12855:8;12840:12;:23;:::i;:::-;12673:190;;;12722:51;;;;7861:6;3429:7:31;3066:5;;;3463;;;3065:36;3060:42;;3455:20;2825:294;6179:316:21;6256:4;6277:22;6285:4;6291:7;6277;:22::i;:::-;6272:217;;6315:6;:12;;;;;;;;;;;-1:-1:-1;;;;;6315:29:21;;;;;;;;;:36;;-1:-1:-1;;6315:36:21;6347:4;6315:36;;;6397:12;735:10:27;;656:96;6397:12:21;-1:-1:-1;;;;;6370:40:21;6388:7;-1:-1:-1;;;;;6370:40:21;6382:4;6370:40;;;;;;;;;;-1:-1:-1;6431:4:21;6424:11;;6272:217;-1:-1:-1;6473:5:21;6466:12;;6730:317;6808:4;6828:22;6836:4;6842:7;6828;:22::i;:::-;6824:217;;;6898:5;6866:12;;;;;;;;;;;-1:-1:-1;;;;;6866:29:21;;;;;;;;;;:37;;-1:-1:-1;;6866:37:21;;;6922:40;735:10:27;;6866:12:21;;6922:40;;6898:5;6922:40;-1:-1:-1;6983:4:21;6976:11;;14:286:38;72:6;125:2;113:9;104:7;100:23;96:32;93:52;;;141:1;138;131:12;93:52;167:23;;-1:-1:-1;;;;;;219:32:38;;209:43;;199:71;;266:1;263;256:12;698:180;757:6;810:2;798:9;789:7;785:23;781:32;778:52;;;826:1;823;816:12;778:52;-1:-1:-1;849:23:38;;698:180;-1:-1:-1;698:180:38:o;1432:173::-;1500:20;;-1:-1:-1;;;;;1549:31:38;;1539:42;;1529:70;;1595:1;1592;1585:12;1529:70;1432:173;;;:::o;1610:254::-;1678:6;1686;1739:2;1727:9;1718:7;1714:23;1710:32;1707:52;;;1755:1;1752;1745:12;1707:52;1791:9;1778:23;1768:33;;1820:38;1854:2;1843:9;1839:18;1820:38;:::i;:::-;1810:48;;1610:254;;;;;:::o;1869:645::-;1985:6;1993;2046:2;2034:9;2025:7;2021:23;2017:32;2014:52;;;2062:1;2059;2052:12;2014:52;2102:9;2089:23;2131:18;2172:2;2164:6;2161:14;2158:34;;;2188:1;2185;2178:12;2158:34;2226:6;2215:9;2211:22;2201:32;;2271:7;2264:4;2260:2;2256:13;2252:27;2242:55;;2293:1;2290;2283:12;2242:55;2333:2;2320:16;2359:2;2351:6;2348:14;2345:34;;;2375:1;2372;2365:12;2345:34;2428:7;2423:2;2413:6;2410:1;2406:14;2402:2;2398:23;2394:32;2391:45;2388:65;;;2449:1;2446;2439:12;2388:65;2480:2;2472:11;;;;;2502:6;;-1:-1:-1;1869:645:38;;-1:-1:-1;;;;1869:645:38:o;2519:397::-;2605:6;2613;2621;2629;2682:3;2670:9;2661:7;2657:23;2653:33;2650:53;;;2699:1;2696;2689:12;2650:53;2735:9;2722:23;2712:33;;2764:38;2798:2;2787:9;2783:18;2764:38;:::i;:::-;2754:48;;2821:38;2855:2;2844:9;2840:18;2821:38;:::i;:::-;2519:397;;;;-1:-1:-1;2811:48:38;;2906:2;2891:18;2878:32;;-1:-1:-1;;2519:397:38:o;2921:186::-;2980:6;3033:2;3021:9;3012:7;3008:23;3004:32;3001:52;;;3049:1;3046;3039:12;3001:52;3072:29;3091:9;3072:29;:::i;3112:280::-;3170:6;3223:2;3211:9;3202:7;3198:23;3194:32;3191:52;;;3239:1;3236;3229:12;3191:52;3278:9;3265:23;3328:14;3321:5;3317:26;3310:5;3307:37;3297:65;;3358:1;3355;3348:12;3397:779;3455:5;3503:4;3491:9;3486:3;3482:19;3478:30;3475:50;;;3521:1;3518;3511:12;3475:50;3554:2;3548:9;3596:4;3588:6;3584:17;3667:6;3655:10;3652:22;3631:18;3619:10;3616:34;3613:62;3610:185;;;3717:10;3712:3;3708:20;3705:1;3698:31;3752:4;3749:1;3742:15;3780:4;3777:1;3770:15;3610:185;3815:10;3811:2;3804:22;;3844:6;3835:15;;3887:9;3874:23;3866:6;3859:39;3959:2;3948:9;3944:18;3931:32;3926:2;3918:6;3914:15;3907:57;4025:2;4014:9;4010:18;3997:32;3992:2;3984:6;3980:15;3973:57;4091:2;4080:9;4076:18;4063:32;4058:2;4050:6;4046:15;4039:57;4130:39;4164:3;4153:9;4149:19;4130:39;:::i;:::-;4124:3;4116:6;4112:16;4105:65;;3397:779;;;;:::o;4181:1086::-;4324:6;4332;4340;4348;4356;4364;4372;4425:3;4413:9;4404:7;4400:23;4396:33;4393:53;;;4442:1;4439;4432:12;4393:53;4465:49;4506:7;4495:9;4465:49;:::i;:::-;4455:59;;4561:3;4550:9;4546:19;4533:33;4523:43;;4616:3;4605:9;4601:19;4588:33;4661:4;4654:5;4650:16;4643:5;4640:27;4630:55;;4681:1;4678;4671:12;4630:55;4704:5;-1:-1:-1;4756:3:38;4741:19;;4728:33;;-1:-1:-1;4808:3:38;4793:19;;4780:33;;-1:-1:-1;4864:3:38;4849:19;;4836:33;4888:18;4918:14;;;4915:34;;;4945:1;4942;4935:12;4915:34;4983:6;4972:9;4968:22;4958:32;;5028:7;5021:4;5017:2;5013:13;5009:27;4999:55;;5050:1;5047;5040:12;4999:55;5090:2;5077:16;5116:2;5108:6;5105:14;5102:34;;;5132:1;5129;5122:12;5102:34;5179:7;5172:4;5163:6;5159:2;5155:15;5151:26;5148:39;5145:59;;;5200:1;5197;5190:12;5145:59;5231:4;5227:2;5223:13;5213:23;;5255:6;5245:16;;;;;4181:1086;;;;;;;;;;:::o;5780:306::-;5878:6;5886;5939:3;5927:9;5918:7;5914:23;5910:33;5907:53;;;5956:1;5953;5946:12;5907:53;5979:49;6020:7;6009:9;5979:49;:::i;:::-;5969:59;6075:3;6060:19;;;;6047:33;;-1:-1:-1;;;5780:306:38:o;6901:127::-;6962:10;6957:3;6953:20;6950:1;6943:31;6993:4;6990:1;6983:15;7017:4;7014:1;7007:15;7033:127;7094:10;7089:3;7085:20;7082:1;7075:31;7125:4;7122:1;7115:15;7149:4;7146:1;7139:15;7165:128;7232:9;;;7253:11;;;7250:37;;;7267:18;;:::i;7678:277::-;7745:6;7798:2;7786:9;7777:7;7773:23;7769:32;7766:52;;;7814:1;7811;7804:12;7766:52;7846:9;7840:16;7899:5;7892:13;7885:21;7878:5;7875:32;7865:60;;7921:1;7918;7911:12;7960:388;8117:2;8106:9;8099:21;8156:6;8151:2;8140:9;8136:18;8129:34;8213:6;8205;8200:2;8189:9;8185:18;8172:48;8269:1;8240:22;;;8264:2;8236:31;;;8229:42;;;;8332:2;8311:15;;;-1:-1:-1;;8307:29:38;8292:45;8288:54;;7960:388;-1:-1:-1;7960:388:38:o;9134:176::-;9201:14;9235:10;;;9247;;;9231:27;;9270:11;;;9267:37;;;9284:18;;:::i;:::-;9267:37;9134:176;;;;:::o;9315:135::-;9354:3;9375:17;;;9372:43;;9395:18;;:::i;:::-;-1:-1:-1;9442:1:38;9431:13;;9315:135::o;10907:179::-;10975:14;11022:10;;;11010;;;11006:27;;11045:12;;;11042:38;;;11060:18;;:::i","linkReferences":{},"immutableReferences":{"49726":[{"start":398,"length":32},{"start":442,"length":32}]}},"methodIdentifiers":{"DEFAULT_ADMIN_ROLE()":"a217fddf","SEQUENCER_ROLE()":"4842855c","acceptDefaultAdminTransfer()":"cefc1429","beginDefaultAdminTransfer(address)":"634e93da","blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":"c7bc4a62","cancelDefaultAdminTransfer()":"d602b9fd","changeDefaultAdminDelay(uint48)":"649a5ec7","defaultAdmin()":"84ef8ffc","defaultAdminDelay()":"cc8463c8","defaultAdminDelayIncreaseWait()":"022d63fb","enter(uint256,address)":"ea3b9ba1","enter(uint256,address,address,uint256)":"3805c6bd","fulfillExits((uint256,address,address,uint256)[])":"36702119","getRoleAdmin(bytes32)":"248a9ca3","grantRole(bytes32,address)":"2f2ff15d","hasRole(bytes32,address)":"91d14854","lastSubmittedAtBlock(uint256)":"7e82bb01","nextSequence(uint256)":"1e663720","owner()":"8da5cb5b","pendingDefaultAdmin()":"cf6eefb7","pendingDefaultAdminDelay()":"a1eda53c","renounceRole(bytes32,address)":"36568abe","revokeRole(bytes32,address)":"d547741f","rollbackDefaultAdminDelay()":"0aa6220b","submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":"7e569274","supportsInterface(bytes4)":"01ffc9a7"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.25+commit.b61c2a91\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"defaultRollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessControlBadConfirmation\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"name\":\"AccessControlEnforcedDefaultAdminDelay\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"AccessControlEnforcedDefaultAdminRules\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"defaultAdmin\",\"type\":\"address\"}],\"name\":\"AccessControlInvalidDefaultAdmin\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"neededRole\",\"type\":\"bytes32\"}],\"name\":\"AccessControlUnauthorizedAccount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expected\",\"type\":\"uint256\"}],\"name\":\"BadSequence\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"derivedSequencer\",\"type\":\"address\"}],\"name\":\"BadSignature\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"BlockExpired\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OneRollupBlockPerHostBlock\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OrderExpired\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"bits\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"SafeCastOverflowedUintDowncast\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"blockData\",\"type\":\"bytes\"}],\"name\":\"BlockData\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sequencer\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"indexed\":true,\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"}],\"name\":\"BlockSubmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"DefaultAdminDelayChangeCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"},{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"effectSchedule\",\"type\":\"uint48\"}],\"name\":\"DefaultAdminDelayChangeScheduled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"DefaultAdminTransferCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"acceptSchedule\",\"type\":\"uint48\"}],\"name\":\"DefaultAdminTransferScheduled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"Enter\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"hostRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ExitFilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"previousAdminRole\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"newAdminRole\",\"type\":\"bytes32\"}],\"name\":\"RoleAdminChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RoleGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RoleRevoked\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"DEFAULT_ADMIN_ROLE\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SEQUENCER_ROLE\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"beginDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"}],\"name\":\"blockCommitment\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"commit\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"cancelDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"}],\"name\":\"changeDefaultAdminDelay\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdminDelay\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdminDelayIncreaseWait\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"enter\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"}],\"name\":\"enter\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"struct Passage.ExitOrder[]\",\"name\":\"orders\",\"type\":\"tuple[]\"}],\"name\":\"fulfillExits\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"}],\"name\":\"getRoleAdmin\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"grantRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"hasRole\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"lastSubmittedAtBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"nextSequence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingDefaultAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"},{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingDefaultAdminDelay\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"},{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"renounceRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"revokeRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"rollbackDefaultAdminDelay\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"v\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"r\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"s\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blockData\",\"type\":\"bytes\"}],\"name\":\"submitBlock\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}],\"devdoc\":{\"errors\":{\"AccessControlBadConfirmation()\":[{\"details\":\"The caller of a function is not the expected one. NOTE: Don't confuse with {AccessControlUnauthorizedAccount}.\"}],\"AccessControlEnforcedDefaultAdminDelay(uint48)\":[{\"details\":\"The delay for transferring the default admin delay is enforced and the operation must wait until `schedule`. NOTE: `schedule` can be 0 indicating there's no transfer scheduled.\"}],\"AccessControlEnforcedDefaultAdminRules()\":[{\"details\":\"At least one of the following rules was violated: - The `DEFAULT_ADMIN_ROLE` must only be managed by itself. - The `DEFAULT_ADMIN_ROLE` must only be held by one account at the time. - Any `DEFAULT_ADMIN_ROLE` transfer must be in two delayed steps.\"}],\"AccessControlInvalidDefaultAdmin(address)\":[{\"details\":\"The new default admin is not a valid default admin.\"}],\"AccessControlUnauthorizedAccount(address,bytes32)\":[{\"details\":\"The `account` is missing a role.\"}],\"BadSequence(uint256)\":[{\"details\":\"Blocks must be submitted in strict monotonic increasing order.\",\"params\":{\"expected\":\"- the correct next sequence number for the given rollup chainId.\"}}],\"BadSignature(address)\":[{\"params\":{\"derivedSequencer\":\"- the derived signer of the block data that is not a permissioned sequencer.\"}}],\"SafeCastOverflowedUintDowncast(uint8,uint256)\":[{\"details\":\"Value doesn't fit in an uint of `bits` size.\"}]},\"events\":{\"BlockSubmitted(address,(uint256,uint256,uint256,uint256,address),bytes32)\":{\"params\":{\"blockDataHash\":\"- keccak256(blockData). the Node will discard the block if the hash doens't match.\",\"header\":\"- the block header information for the block.\",\"sequencer\":\"- the address of the sequencer that signed the block.\"}},\"DefaultAdminDelayChangeCanceled()\":{\"details\":\"Emitted when a {pendingDefaultAdminDelay} is reset if its schedule didn't pass.\"},\"DefaultAdminDelayChangeScheduled(uint48,uint48)\":{\"details\":\"Emitted when a {defaultAdminDelay} change is started, setting `newDelay` as the next delay to be applied between default admin transfer after `effectSchedule` has passed.\"},\"DefaultAdminTransferCanceled()\":{\"details\":\"Emitted when a {pendingDefaultAdmin} is reset if it was never accepted, regardless of its schedule.\"},\"DefaultAdminTransferScheduled(address,uint48)\":{\"details\":\"Emitted when a {defaultAdmin} transfer is started, setting `newAdmin` as the next address to become the {defaultAdmin} by calling {acceptDefaultAdminTransfer} only after `acceptSchedule` passes.\"},\"Enter(uint256,address,address,uint256)\":{\"params\":{\"amount\":\"- The amount of the token entering the rollup.\",\"rollupRecipient\":\"- The recipient of the token on the rollup.\",\"token\":\"- The address of the token entering the rollup.\"}},\"ExitFilled(uint256,address,address,uint256)\":{\"params\":{\"amount\":\"- The amount of the token transferred to the recipient.\",\"hostRecipient\":\"- The recipient of the token on host.\",\"token\":\"- The address of the token transferred to the recipient.\"}},\"RoleAdminChanged(bytes32,bytes32,bytes32)\":{\"details\":\"Emitted when `newAdminRole` is set as ``role``'s admin role, replacing `previousAdminRole` `DEFAULT_ADMIN_ROLE` is the starting admin for all roles, despite {RoleAdminChanged} not being emitted signaling this.\"},\"RoleGranted(bytes32,address,address)\":{\"details\":\"Emitted when `account` is granted `role`. `sender` is the account that originated the contract call. This account bears the admin role (for the granted role). Expected in cases where the role was granted using the internal {AccessControl-_grantRole}.\"},\"RoleRevoked(bytes32,address,address)\":{\"details\":\"Emitted when `account` is revoked `role`. `sender` is the account that originated the contract call: - if using `revokeRole`, it is the admin role bearer - if using `renounceRole`, it is the role bearer (i.e. `account`)\"}},\"kind\":\"dev\",\"methods\":{\"acceptDefaultAdminTransfer()\":{\"details\":\"Completes a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. After calling the function: - `DEFAULT_ADMIN_ROLE` should be granted to the caller. - `DEFAULT_ADMIN_ROLE` should be revoked from the previous holder. - {pendingDefaultAdmin} should be reset to zero values. Requirements: - Only can be called by the {pendingDefaultAdmin}'s `newAdmin`. - The {pendingDefaultAdmin}'s `acceptSchedule` should've passed.\"},\"beginDefaultAdminTransfer(address)\":{\"details\":\"Starts a {defaultAdmin} transfer by setting a {pendingDefaultAdmin} scheduled for acceptance after the current timestamp plus a {defaultAdminDelay}. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminRoleChangeStarted event.\"},\"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)\":{\"params\":{\"header\":\"- the header information for the rollup block.\"},\"returns\":{\"commit\":\"- the hash of the encoded block details.\"}},\"cancelDefaultAdminTransfer()\":{\"details\":\"Cancels a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. A {pendingDefaultAdmin} not yet accepted can also be cancelled with this function. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminTransferCanceled event.\"},\"changeDefaultAdminDelay(uint48)\":{\"details\":\"Initiates a {defaultAdminDelay} update by setting a {pendingDefaultAdminDelay} scheduled for getting into effect after the current timestamp plus a {defaultAdminDelay}. This function guarantees that any call to {beginDefaultAdminTransfer} done between the timestamp this method is called and the {pendingDefaultAdminDelay} effect schedule will use the current {defaultAdminDelay} set before calling. The {pendingDefaultAdminDelay}'s effect schedule is defined in a way that waiting until the schedule and then calling {beginDefaultAdminTransfer} with the new delay will take at least the same as another {defaultAdmin} complete transfer (including acceptance). The schedule is designed for two scenarios: - When the delay is changed for a larger one the schedule is `block.timestamp + newDelay` capped by {defaultAdminDelayIncreaseWait}. - When the delay is changed for a shorter one, the schedule is `block.timestamp + (current delay - new delay)`. A {pendingDefaultAdminDelay} that never got into effect will be canceled in favor of a new scheduled change. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminDelayChangeScheduled event and may emit a DefaultAdminDelayChangeCanceled event.\"},\"constructor\":{\"details\":\"See `AccessControlDefaultAdminRules` for information on contract administration. - Admin role can grant and revoke Sequencer roles. - Admin role can be transferred via two-step process with a 1 day timelock.\",\"params\":{\"admin\":\"- the address that will be the initial admin.\"}},\"defaultAdmin()\":{\"details\":\"Returns the address of the current `DEFAULT_ADMIN_ROLE` holder.\"},\"defaultAdminDelay()\":{\"details\":\"Returns the delay required to schedule the acceptance of a {defaultAdmin} transfer started. This delay will be added to the current timestamp when calling {beginDefaultAdminTransfer} to set the acceptance schedule. NOTE: If a delay change has been scheduled, it will take effect as soon as the schedule passes, making this function returns the new delay. See {changeDefaultAdminDelay}.\"},\"defaultAdminDelayIncreaseWait()\":{\"details\":\"Maximum time in seconds for an increase to {defaultAdminDelay} (that is scheduled using {changeDefaultAdminDelay}) to take effect. Default to 5 days. When the {defaultAdminDelay} is scheduled to be increased, it goes into effect after the new delay has passed with the purpose of giving enough time for reverting any accidental change (i.e. using milliseconds instead of seconds) that may lock the contract. However, to avoid excessive schedules, the wait is capped by this function and it can be overrode for a custom {defaultAdminDelay} increase scheduling. IMPORTANT: Make sure to add a reasonable amount of time while overriding this value, otherwise, there's a risk of setting a high new delay that goes into effect almost immediately without the possibility of human intervention in the case of an input error (eg. set milliseconds instead of seconds).\"},\"enter(uint256,address)\":{\"custom:emits\":\"Enter indicating the amount of Ether to mint on the rollup & its recipient.\",\"details\":\"Permanently burns the entire msg.value by locking it in this contract.\",\"params\":{\"rollupChainId\":\"- The rollup chain to enter.\",\"rollupRecipient\":\"- The recipient of the Ether on the rollup.\"}},\"enter(uint256,address,address,uint256)\":{\"custom:emits\":\"Enter indicating the amount of tokens to mint on the rollup & its recipient.\",\"details\":\"Permanently burns the token amount by locking it in this contract.\",\"params\":{\"amount\":\"- The amount of the ERC20 token to transfer to the rollup.\",\"rollupChainId\":\"- The rollup chain to enter.\",\"rollupRecipient\":\"- The recipient of the Ether on the rollup.\",\"token\":\"- The address of the ERC20 token on the Host.\"}},\"fulfillExits((uint256,address,address,uint256)[])\":{\"custom:emits\":\"ExitFilled for each exit order fulfilled.\",\"details\":\"Builder SHOULD call `filfillExits` atomically with `submitBlock`. Builder SHOULD set a block expiration time that is AT MOST the minimum of all exit order deadlines; this way, `fulfillExits` + `submitBlock` will revert atomically on mainnet if any exit orders have expired. Otherwise, `filfillExits` may mine on mainnet, while `submitExit` reverts on the rollup, and the Builder can't collect the corresponding value on the rollup.Called by the Builder atomically with a transaction calling `submitBlock`. The user-submitted transactions initiating the ExitOrders on the rollup must be included by the Builder in the rollup block submitted via `submitBlock`.The user transfers tokenIn on the rollup, and receives tokenOut on host.The Builder receives tokenIn on the rollup, and transfers tokenOut to the user on host.The rollup STF MUST NOT apply `submitExit` transactions to the rollup state UNLESS a corresponding ExitFilled event is emitted on host in the same block.If the user submits multiple exit transactions for the same token in the same rollup block, the Builder may transfer the cumulative tokenOut to the user in a single ExitFilled event. The rollup STF will apply the user's exit transactions on the rollup up to the point that sum(tokenOut) is lte the ExitFilled amount. TODO: add option to fulfill ExitOrders with native ETH? or is it sufficient to only allow users to exit via WETH?\",\"params\":{\"orders\":\"The exit orders to fulfill\"}},\"getRoleAdmin(bytes32)\":{\"details\":\"Returns the admin role that controls `role`. See {grantRole} and {revokeRole}. To change a role's admin, use {_setRoleAdmin}.\"},\"grantRole(bytes32,address)\":{\"details\":\"See {AccessControl-grantRole}. Reverts for `DEFAULT_ADMIN_ROLE`.\"},\"hasRole(bytes32,address)\":{\"details\":\"Returns `true` if `account` has been granted `role`.\"},\"owner()\":{\"details\":\"See {IERC5313-owner}.\"},\"pendingDefaultAdmin()\":{\"details\":\"Returns a tuple of a `newAdmin` and an accept schedule. After the `schedule` passes, the `newAdmin` will be able to accept the {defaultAdmin} role by calling {acceptDefaultAdminTransfer}, completing the role transfer. A zero value only in `acceptSchedule` indicates no pending admin transfer. NOTE: A zero address `newAdmin` means that {defaultAdmin} is being renounced.\"},\"pendingDefaultAdminDelay()\":{\"details\":\"Returns a tuple of `newDelay` and an effect schedule. After the `schedule` passes, the `newDelay` will get into effect immediately for every new {defaultAdmin} transfer started with {beginDefaultAdminTransfer}. A zero value only in `effectSchedule` indicates no pending delay change. NOTE: A zero value only for `newDelay` means that the next {defaultAdminDelay} will be zero after the effect schedule.\"},\"renounceRole(bytes32,address)\":{\"details\":\"See {AccessControl-renounceRole}. For the `DEFAULT_ADMIN_ROLE`, it only allows renouncing in two steps by first calling {beginDefaultAdminTransfer} to the `address(0)`, so it's required that the {pendingDefaultAdmin} schedule has also passed when calling this function. After its execution, it will not be possible to call `onlyRole(DEFAULT_ADMIN_ROLE)` functions. NOTE: Renouncing `DEFAULT_ADMIN_ROLE` will leave the contract without a {defaultAdmin}, thereby disabling any functionality that is only available for it, and the possibility of reassigning a non-administrated role.\"},\"revokeRole(bytes32,address)\":{\"details\":\"See {AccessControl-revokeRole}. Reverts for `DEFAULT_ADMIN_ROLE`.\"},\"rollbackDefaultAdminDelay()\":{\"details\":\"Cancels a scheduled {defaultAdminDelay} change. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminDelayChangeCanceled event.\"},\"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)\":{\"custom:emits\":\"BlockSubmitted if the block is successfully submitted.BlockData to expose the block calldata; as a convenience until calldata tracing is implemented in the Node.\",\"custom:reverts\":\"BadSequence if the sequence number is not the next block for the given rollup chainId.BlockExpired if the confirmBy time has passed.BadSignature if the signer is not a permissioned sequencer, OR if the signature provided commits to a different header.OneRollupBlockPerHostBlock if attempting to submit a second rollup block within one host block.\",\"details\":\"Blocks are submitted by Builders, with an attestation to the block data signed by a Sequencer.including blockDataHash allows the sequencer to sign over finalized block data, without needing to calldatacopy the `blockData` param.\",\"params\":{\"blockData\":\"- block data information. could be packed blob hashes, or direct rlp-encoded transctions. blockData is ignored by the contract logic.\",\"blockDataHash\":\"- keccak256(blockData). the Node will discard the block if the hash doens't match.\",\"header\":\"- the header information for the rollup block.\",\"r\":\"- the r component of the Sequencer's ECSDA signature over the block header.\",\"s\":\"- the s component of the Sequencer's ECSDA signature over the block header.\",\"v\":\"- the v component of the Sequencer's ECSDA signature over the block header.\"}},\"supportsInterface(bytes4)\":{\"details\":\"See {IERC165-supportsInterface}.\"}},\"version\":1},\"userdoc\":{\"errors\":{\"BadSequence(uint256)\":[{\"notice\":\"Thrown when a block submission is attempted with a sequence number that is not the next block for the rollup chainId.\"}],\"BadSignature(address)\":[{\"notice\":\"Thrown when a block submission is attempted with a signature by a non-permissioned sequencer, OR when signature is produced over different data than is provided.\"}],\"BlockExpired()\":[{\"notice\":\"Thrown when a block submission is attempted when the confirmBy time has passed.\"}],\"OneRollupBlockPerHostBlock()\":[{\"notice\":\"Thrown when attempting to submit more than one rollup block per host block\"}],\"OrderExpired()\":[{\"notice\":\"Thrown when attempting to fulfill an exit order with a deadline that has passed.\"}]},\"events\":{\"BlockData(bytes)\":{\"notice\":\"Emit the entire block data for easy visibility\"},\"BlockSubmitted(address,(uint256,uint256,uint256,uint256,address),bytes32)\":{\"notice\":\"Emitted when a new rollup block is successfully submitted.\"},\"Enter(uint256,address,address,uint256)\":{\"notice\":\"Emitted when tokens enter the rollup.\"},\"ExitFilled(uint256,address,address,uint256)\":{\"notice\":\"Emitted when an exit order is fulfilled by the Builder.\"}},\"kind\":\"user\",\"methods\":{\"SEQUENCER_ROLE()\":{\"notice\":\"Role that allows a key to sign commitments to rollup blocks.\"},\"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)\":{\"notice\":\"Construct hash of block details that the sequencer signs.\"},\"constructor\":{\"notice\":\"Initializes the Admin role.\"},\"enter(uint256,address)\":{\"notice\":\"Allows native Ether to enter the rollup.\"},\"enter(uint256,address,address,uint256)\":{\"notice\":\"Allows ERC20s to enter the rollup.\"},\"fulfillExits((uint256,address,address,uint256)[])\":{\"notice\":\"Fulfills exit orders by transferring tokenOut to the recipient\"},\"lastSubmittedAtBlock(uint256)\":{\"notice\":\"The host block number that a block was last submitted at for a given rollup chainId. rollupChainId => host blockNumber that block was last submitted at\"},\"nextSequence(uint256)\":{\"notice\":\"The sequence number of the next block that can be submitted for a given rollup chainId. rollupChainId => nextSequence number\"},\"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)\":{\"notice\":\"Submit a rollup block with block data submitted via calldata.\"}},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/Zenith.sol\":\"Zenith\"},\"evmVersion\":\"cancun\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":ds-test/=lib/forge-std/lib/ds-test/src/\",\":erc4626-tests/=lib/openzeppelin-contracts/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\"]},\"sources\":{\"lib/openzeppelin-contracts/contracts/access/AccessControl.sol\":{\"keccak256\":\"0xa0e92d42942f4f57c5be50568dac11e9d00c93efcb458026e18d2d9b9b2e7308\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://46326c0bb1e296b67185e81c918e0b40501b8b6386165855df0a3f3c634b6a80\",\"dweb:/ipfs/QmTwyrDYtsxsk6pymJTK94PnEpzsmkpUxFuzEiakDopy4Z\"]},\"lib/openzeppelin-contracts/contracts/access/IAccessControl.sol\":{\"keccak256\":\"0xc503b1464e90b1cf79d81239f719f81c35ff646b17b638c87fe87a1d7bc5d94d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://381076837654e98f1d5dfc3909a3ebb80e2c86a97d662b507320701e09cb7a60\",\"dweb:/ipfs/QmWGwdWe9JWx2ae3n8EhWuY6ipWo6shVg9bct6y5og7v9Y\"]},\"lib/openzeppelin-contracts/contracts/access/extensions/AccessControlDefaultAdminRules.sol\":{\"keccak256\":\"0xd5e43578dce2678fbd458e1221dc37b20e983ecce4a314b422704f07d6015c5b\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://9ea4d9ae3392dc9db1ef4d7ebef84ce7fa243dc14abb46e68eb2eb60d2cd0e93\",\"dweb:/ipfs/QmRfjyDoLWF74EgmpcGkWZM7Kx1LgHN8dZHBxAnU9vPH46\"]},\"lib/openzeppelin-contracts/contracts/access/extensions/IAccessControlDefaultAdminRules.sol\":{\"keccak256\":\"0xc2dbeddf97707bf012827013b4a072bacbe56ad3219c405e30fd2a959e8a5413\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://281289e424c30c2ea92fc25598315117410404cf76a756663ad39ba18fd38b48\",\"dweb:/ipfs/Qma3wmq2cjxpfkKKM7JrvyJzzohsNWNNWsnaf3jVNBD65v\"]},\"lib/openzeppelin-contracts/contracts/interfaces/IERC5313.sol\":{\"keccak256\":\"0x22412c268e74cc3cbf550aecc2f7456f6ac40783058e219cfe09f26f4d396621\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://0b841021f25480424d2359de4869e60e77f790f52e8e85f07aa389543024b559\",\"dweb:/ipfs/QmV7U5ehV5xe3QrbE8ErxfWSSzK1T1dGeizXvYPjWpNDGq\"]},\"lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol\":{\"keccak256\":\"0xee2337af2dc162a973b4be6d3f7c16f06298259e0af48c5470d2839bfa8a22f4\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://30c476b4b2f405c1bb3f0bae15b006d129c80f1bfd9d0f2038160a3bb9745009\",\"dweb:/ipfs/Qmb3VcuDufv6xbHeVgksC4tHpc5gKYVqBEwjEXW72XzSvN\"]},\"lib/openzeppelin-contracts/contracts/utils/Context.sol\":{\"keccak256\":\"0x493033a8d1b176a037b2cc6a04dad01a5c157722049bbecf632ca876224dd4b2\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://6a708e8a5bdb1011c2c381c9a5cfd8a9a956d7d0a9dc1bd8bcdaf52f76ef2f12\",\"dweb:/ipfs/Qmax9WHBnVsZP46ZxEMNRQpLQnrdE4dK8LehML1Py8FowF\"]},\"lib/openzeppelin-contracts/contracts/utils/Panic.sol\":{\"keccak256\":\"0x29074fe5a74bb024c57b3570abf6c74d8bceed3438694d470fd0166a3ecd196a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://f4f8435ccbc56e384f4cc9ac9ff491cf30a82f2beac00e33ccc2cf8af3f77cc3\",\"dweb:/ipfs/QmUKJXxTe6nn1qfgnX8xbnboNNAPUuEmJyGqMZCKNiFBgn\"]},\"lib/openzeppelin-contracts/contracts/utils/introspection/ERC165.sol\":{\"keccak256\":\"0x6fac27fb1885a1d9fd2ce3f8fac4e44a6596ca4d44207c9ef2541ba8c941291e\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://2079378abdb36baec15c23bc2353b73a3d28d1d0610b436b0c1c4e6fa61d65c9\",\"dweb:/ipfs/QmVZkRFMzKW7sLaugKSTbMNnUBKWF3QDsoMi5uoQFyVMjf\"]},\"lib/openzeppelin-contracts/contracts/utils/introspection/IERC165.sol\":{\"keccak256\":\"0xc859863e3bda7ec3cddf6dafe2ffe91bcbe648d1395b856b839c32ee9617c44c\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://a9d5417888b873cf2225ed5d50b2a67be97c1504134a2a580512168d587ad82e\",\"dweb:/ipfs/QmNr5fTb2heFW658NZn7dDnofZgFvQTnNxKRJ3wdnR1skX\"]},\"lib/openzeppelin-contracts/contracts/utils/math/Math.sol\":{\"keccak256\":\"0x3233b02fcf2b20a41cce60a62e43c7e5a67a55b738ec1db842a82452e6aa170d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://231c75d18bc6973533dfa7d58d2b97e504ca4e21d703a5c8b0ec31475e97db67\",\"dweb:/ipfs/QmPJ29HDuFceD1FDr4CnjYYtvaQ234wGAfojZpL3RXFG26\"]},\"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol\":{\"keccak256\":\"0x8cd59334ed58b8884cd1f775afc9400db702e674e5d6a7a438c655b9de788d7e\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://99e62c7de7318f413b6352e3f2704ca23e7725ff144e43c8bd574d12dbf29047\",\"dweb:/ipfs/QmSEXG2rBx1VxU2uFTWdiChjDvA4osEY2mesjmoVeVhHko\"]},\"src/Passage.sol\":{\"keccak256\":\"0x81016c92006558f93c028e3d4f61ddad8ff870b956edaa19ad2ccd68ec5d292a\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://dc70a7d97b18e988ce9560f4fabbf9caea3c6178f64fab056b1cf63d27bef6c5\",\"dweb:/ipfs/QmeJDLqvLdhkbWfyLHdYUPoGz7XHWw3zpe8YTCMQE9MacX\"]},\"src/Zenith.sol\":{\"keccak256\":\"0x0febef21c15ebf62421e25337341a8a11a6dd5b5dc2e9ea967a2d4769469ecd6\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://405a8eb90f834ab216e96d40b8c1cfd98c4bc4e71399b09c04ef4123eb3bb1ab\",\"dweb:/ipfs/QmVakr7Upoe2tgU1jQSZUgXE1UASAuHh9kReZ2mfgCsdha\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.25+commit.b61c2a91"},"language":"Solidity","output":{"abi":[{"inputs":[{"internalType":"uint256","name":"defaultRollupChainId","type":"uint256"},{"internalType":"address","name":"admin","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"type":"error","name":"AccessControlBadConfirmation"},{"inputs":[{"internalType":"uint48","name":"schedule","type":"uint48"}],"type":"error","name":"AccessControlEnforcedDefaultAdminDelay"},{"inputs":[],"type":"error","name":"AccessControlEnforcedDefaultAdminRules"},{"inputs":[{"internalType":"address","name":"defaultAdmin","type":"address"}],"type":"error","name":"AccessControlInvalidDefaultAdmin"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"bytes32","name":"neededRole","type":"bytes32"}],"type":"error","name":"AccessControlUnauthorizedAccount"},{"inputs":[{"internalType":"uint256","name":"expected","type":"uint256"}],"type":"error","name":"BadSequence"},{"inputs":[{"internalType":"address","name":"derivedSequencer","type":"address"}],"type":"error","name":"BadSignature"},{"inputs":[],"type":"error","name":"BlockExpired"},{"inputs":[],"type":"error","name":"OneRollupBlockPerHostBlock"},{"inputs":[],"type":"error","name":"OrderExpired"},{"inputs":[{"internalType":"uint8","name":"bits","type":"uint8"},{"internalType":"uint256","name":"value","type":"uint256"}],"type":"error","name":"SafeCastOverflowedUintDowncast"},{"inputs":[{"internalType":"bytes","name":"blockData","type":"bytes","indexed":false}],"type":"event","name":"BlockData","anonymous":false},{"inputs":[{"internalType":"address","name":"sequencer","type":"address","indexed":true},{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}],"indexed":true},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32","indexed":false}],"type":"event","name":"BlockSubmitted","anonymous":false},{"inputs":[],"type":"event","name":"DefaultAdminDelayChangeCanceled","anonymous":false},{"inputs":[{"internalType":"uint48","name":"newDelay","type":"uint48","indexed":false},{"internalType":"uint48","name":"effectSchedule","type":"uint48","indexed":false}],"type":"event","name":"DefaultAdminDelayChangeScheduled","anonymous":false},{"inputs":[],"type":"event","name":"DefaultAdminTransferCanceled","anonymous":false},{"inputs":[{"internalType":"address","name":"newAdmin","type":"address","indexed":true},{"internalType":"uint48","name":"acceptSchedule","type":"uint48","indexed":false}],"type":"event","name":"DefaultAdminTransferScheduled","anonymous":false},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256","indexed":false},{"internalType":"address","name":"token","type":"address","indexed":true},{"internalType":"address","name":"rollupRecipient","type":"address","indexed":true},{"internalType":"uint256","name":"amount","type":"uint256","indexed":false}],"type":"event","name":"Enter","anonymous":false},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256","indexed":false},{"internalType":"address","name":"token","type":"address","indexed":true},{"internalType":"address","name":"hostRecipient","type":"address","indexed":true},{"internalType":"uint256","name":"amount","type":"uint256","indexed":false}],"type":"event","name":"ExitFilled","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"bytes32","name":"previousAdminRole","type":"bytes32","indexed":true},{"internalType":"bytes32","name":"newAdminRole","type":"bytes32","indexed":true}],"type":"event","name":"RoleAdminChanged","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"address","name":"account","type":"address","indexed":true},{"internalType":"address","name":"sender","type":"address","indexed":true}],"type":"event","name":"RoleGranted","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"address","name":"account","type":"address","indexed":true},{"internalType":"address","name":"sender","type":"address","indexed":true}],"type":"event","name":"RoleRevoked","anonymous":false},{"inputs":[],"stateMutability":"payable","type":"fallback"},{"inputs":[],"stateMutability":"view","type":"function","name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SEQUENCER_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"acceptDefaultAdminTransfer"},{"inputs":[{"internalType":"address","name":"newAdmin","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"beginDefaultAdminTransfer"},{"inputs":[{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}]},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32"}],"stateMutability":"view","type":"function","name":"blockCommitment","outputs":[{"internalType":"bytes32","name":"commit","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"cancelDefaultAdminTransfer"},{"inputs":[{"internalType":"uint48","name":"newDelay","type":"uint48"}],"stateMutability":"nonpayable","type":"function","name":"changeDefaultAdminDelay"},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdmin","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdminDelay","outputs":[{"internalType":"uint48","name":"","type":"uint48"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdminDelayIncreaseWait","outputs":[{"internalType":"uint48","name":"","type":"uint48"}]},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"rollupRecipient","type":"address"},{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"stateMutability":"payable","type":"function","name":"enter"},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"rollupRecipient","type":"address"}],"stateMutability":"payable","type":"function","name":"enter"},{"inputs":[{"internalType":"struct Passage.ExitOrder[]","name":"orders","type":"tuple[]","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"token","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}]}],"stateMutability":"payable","type":"function","name":"fulfillExits"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"stateMutability":"view","type":"function","name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"grantRole"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"view","type":"function","name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"lastSubmittedAtBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"nextSequence","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingDefaultAdmin","outputs":[{"internalType":"address","name":"newAdmin","type":"address"},{"internalType":"uint48","name":"schedule","type":"uint48"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingDefaultAdminDelay","outputs":[{"internalType":"uint48","name":"newDelay","type":"uint48"},{"internalType":"uint48","name":"schedule","type":"uint48"}]},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"renounceRole"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"revokeRole"},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"rollbackDefaultAdminDelay"},{"inputs":[{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}]},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"},{"internalType":"bytes","name":"blockData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"submitBlock"},{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"stateMutability":"view","type":"function","name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[],"stateMutability":"payable","type":"receive"}],"devdoc":{"kind":"dev","methods":{"acceptDefaultAdminTransfer()":{"details":"Completes a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. After calling the function: - `DEFAULT_ADMIN_ROLE` should be granted to the caller. - `DEFAULT_ADMIN_ROLE` should be revoked from the previous holder. - {pendingDefaultAdmin} should be reset to zero values. Requirements: - Only can be called by the {pendingDefaultAdmin}'s `newAdmin`. - The {pendingDefaultAdmin}'s `acceptSchedule` should've passed."},"beginDefaultAdminTransfer(address)":{"details":"Starts a {defaultAdmin} transfer by setting a {pendingDefaultAdmin} scheduled for acceptance after the current timestamp plus a {defaultAdminDelay}. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminRoleChangeStarted event."},"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":{"params":{"header":"- the header information for the rollup block."},"returns":{"commit":"- the hash of the encoded block details."}},"cancelDefaultAdminTransfer()":{"details":"Cancels a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. A {pendingDefaultAdmin} not yet accepted can also be cancelled with this function. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminTransferCanceled event."},"changeDefaultAdminDelay(uint48)":{"details":"Initiates a {defaultAdminDelay} update by setting a {pendingDefaultAdminDelay} scheduled for getting into effect after the current timestamp plus a {defaultAdminDelay}. This function guarantees that any call to {beginDefaultAdminTransfer} done between the timestamp this method is called and the {pendingDefaultAdminDelay} effect schedule will use the current {defaultAdminDelay} set before calling. The {pendingDefaultAdminDelay}'s effect schedule is defined in a way that waiting until the schedule and then calling {beginDefaultAdminTransfer} with the new delay will take at least the same as another {defaultAdmin} complete transfer (including acceptance). The schedule is designed for two scenarios: - When the delay is changed for a larger one the schedule is `block.timestamp + newDelay` capped by {defaultAdminDelayIncreaseWait}. - When the delay is changed for a shorter one, the schedule is `block.timestamp + (current delay - new delay)`. A {pendingDefaultAdminDelay} that never got into effect will be canceled in favor of a new scheduled change. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminDelayChangeScheduled event and may emit a DefaultAdminDelayChangeCanceled event."},"constructor":{"details":"See `AccessControlDefaultAdminRules` for information on contract administration. - Admin role can grant and revoke Sequencer roles. - Admin role can be transferred via two-step process with a 1 day timelock.","params":{"admin":"- the address that will be the initial admin."}},"defaultAdmin()":{"details":"Returns the address of the current `DEFAULT_ADMIN_ROLE` holder."},"defaultAdminDelay()":{"details":"Returns the delay required to schedule the acceptance of a {defaultAdmin} transfer started. This delay will be added to the current timestamp when calling {beginDefaultAdminTransfer} to set the acceptance schedule. NOTE: If a delay change has been scheduled, it will take effect as soon as the schedule passes, making this function returns the new delay. See {changeDefaultAdminDelay}."},"defaultAdminDelayIncreaseWait()":{"details":"Maximum time in seconds for an increase to {defaultAdminDelay} (that is scheduled using {changeDefaultAdminDelay}) to take effect. Default to 5 days. When the {defaultAdminDelay} is scheduled to be increased, it goes into effect after the new delay has passed with the purpose of giving enough time for reverting any accidental change (i.e. using milliseconds instead of seconds) that may lock the contract. However, to avoid excessive schedules, the wait is capped by this function and it can be overrode for a custom {defaultAdminDelay} increase scheduling. IMPORTANT: Make sure to add a reasonable amount of time while overriding this value, otherwise, there's a risk of setting a high new delay that goes into effect almost immediately without the possibility of human intervention in the case of an input error (eg. set milliseconds instead of seconds)."},"enter(uint256,address)":{"custom:emits":"Enter indicating the amount of Ether to mint on the rollup & its recipient.","details":"Permanently burns the entire msg.value by locking it in this contract.","params":{"rollupChainId":"- The rollup chain to enter.","rollupRecipient":"- The recipient of the Ether on the rollup."}},"enter(uint256,address,address,uint256)":{"custom:emits":"Enter indicating the amount of tokens to mint on the rollup & its recipient.","details":"Permanently burns the token amount by locking it in this contract.","params":{"amount":"- The amount of the ERC20 token to transfer to the rollup.","rollupChainId":"- The rollup chain to enter.","rollupRecipient":"- The recipient of the Ether on the rollup.","token":"- The address of the ERC20 token on the Host."}},"fulfillExits((uint256,address,address,uint256)[])":{"custom:emits":"ExitFilled for each exit order fulfilled.","details":"Builder SHOULD call `filfillExits` atomically with `submitBlock`. Builder SHOULD set a block expiration time that is AT MOST the minimum of all exit order deadlines; this way, `fulfillExits` + `submitBlock` will revert atomically on mainnet if any exit orders have expired. Otherwise, `filfillExits` may mine on mainnet, while `submitExit` reverts on the rollup, and the Builder can't collect the corresponding value on the rollup.Called by the Builder atomically with a transaction calling `submitBlock`. The user-submitted transactions initiating the ExitOrders on the rollup must be included by the Builder in the rollup block submitted via `submitBlock`.The user transfers tokenIn on the rollup, and receives tokenOut on host.The Builder receives tokenIn on the rollup, and transfers tokenOut to the user on host.The rollup STF MUST NOT apply `submitExit` transactions to the rollup state UNLESS a corresponding ExitFilled event is emitted on host in the same block.If the user submits multiple exit transactions for the same token in the same rollup block, the Builder may transfer the cumulative tokenOut to the user in a single ExitFilled event. The rollup STF will apply the user's exit transactions on the rollup up to the point that sum(tokenOut) is lte the ExitFilled amount. TODO: add option to fulfill ExitOrders with native ETH? or is it sufficient to only allow users to exit via WETH?","params":{"orders":"The exit orders to fulfill"}},"getRoleAdmin(bytes32)":{"details":"Returns the admin role that controls `role`. See {grantRole} and {revokeRole}. To change a role's admin, use {_setRoleAdmin}."},"grantRole(bytes32,address)":{"details":"See {AccessControl-grantRole}. Reverts for `DEFAULT_ADMIN_ROLE`."},"hasRole(bytes32,address)":{"details":"Returns `true` if `account` has been granted `role`."},"owner()":{"details":"See {IERC5313-owner}."},"pendingDefaultAdmin()":{"details":"Returns a tuple of a `newAdmin` and an accept schedule. After the `schedule` passes, the `newAdmin` will be able to accept the {defaultAdmin} role by calling {acceptDefaultAdminTransfer}, completing the role transfer. A zero value only in `acceptSchedule` indicates no pending admin transfer. NOTE: A zero address `newAdmin` means that {defaultAdmin} is being renounced."},"pendingDefaultAdminDelay()":{"details":"Returns a tuple of `newDelay` and an effect schedule. After the `schedule` passes, the `newDelay` will get into effect immediately for every new {defaultAdmin} transfer started with {beginDefaultAdminTransfer}. A zero value only in `effectSchedule` indicates no pending delay change. NOTE: A zero value only for `newDelay` means that the next {defaultAdminDelay} will be zero after the effect schedule."},"renounceRole(bytes32,address)":{"details":"See {AccessControl-renounceRole}. For the `DEFAULT_ADMIN_ROLE`, it only allows renouncing in two steps by first calling {beginDefaultAdminTransfer} to the `address(0)`, so it's required that the {pendingDefaultAdmin} schedule has also passed when calling this function. After its execution, it will not be possible to call `onlyRole(DEFAULT_ADMIN_ROLE)` functions. NOTE: Renouncing `DEFAULT_ADMIN_ROLE` will leave the contract without a {defaultAdmin}, thereby disabling any functionality that is only available for it, and the possibility of reassigning a non-administrated role."},"revokeRole(bytes32,address)":{"details":"See {AccessControl-revokeRole}. Reverts for `DEFAULT_ADMIN_ROLE`."},"rollbackDefaultAdminDelay()":{"details":"Cancels a scheduled {defaultAdminDelay} change. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminDelayChangeCanceled event."},"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":{"custom:emits":"BlockSubmitted if the block is successfully submitted.BlockData to expose the block calldata; as a convenience until calldata tracing is implemented in the Node.","custom:reverts":"BadSequence if the sequence number is not the next block for the given rollup chainId.BlockExpired if the confirmBy time has passed.BadSignature if the signer is not a permissioned sequencer, OR if the signature provided commits to a different header.OneRollupBlockPerHostBlock if attempting to submit a second rollup block within one host block.","details":"Blocks are submitted by Builders, with an attestation to the block data signed by a Sequencer.including blockDataHash allows the sequencer to sign over finalized block data, without needing to calldatacopy the `blockData` param.","params":{"blockData":"- block data information. could be packed blob hashes, or direct rlp-encoded transctions. blockData is ignored by the contract logic.","blockDataHash":"- keccak256(blockData). the Node will discard the block if the hash doens't match.","header":"- the header information for the rollup block.","r":"- the r component of the Sequencer's ECSDA signature over the block header.","s":"- the s component of the Sequencer's ECSDA signature over the block header.","v":"- the v component of the Sequencer's ECSDA signature over the block header."}},"supportsInterface(bytes4)":{"details":"See {IERC165-supportsInterface}."}},"version":1},"userdoc":{"kind":"user","methods":{"SEQUENCER_ROLE()":{"notice":"Role that allows a key to sign commitments to rollup blocks."},"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":{"notice":"Construct hash of block details that the sequencer signs."},"constructor":{"notice":"Initializes the Admin role."},"enter(uint256,address)":{"notice":"Allows native Ether to enter the rollup."},"enter(uint256,address,address,uint256)":{"notice":"Allows ERC20s to enter the rollup."},"fulfillExits((uint256,address,address,uint256)[])":{"notice":"Fulfills exit orders by transferring tokenOut to the recipient"},"lastSubmittedAtBlock(uint256)":{"notice":"The host block number that a block was last submitted at for a given rollup chainId. rollupChainId => host blockNumber that block was last submitted at"},"nextSequence(uint256)":{"notice":"The sequence number of the next block that can be submitted for a given rollup chainId. rollupChainId => nextSequence number"},"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":{"notice":"Submit a rollup block with block data submitted via calldata."}},"version":1}},"settings":{"remappings":["@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","ds-test/=lib/forge-std/lib/ds-test/src/","erc4626-tests/=lib/openzeppelin-contracts/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","openzeppelin-contracts/=lib/openzeppelin-contracts/"],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/Zenith.sol":"Zenith"},"evmVersion":"cancun","libraries":{}},"sources":{"lib/openzeppelin-contracts/contracts/access/AccessControl.sol":{"keccak256":"0xa0e92d42942f4f57c5be50568dac11e9d00c93efcb458026e18d2d9b9b2e7308","urls":["bzz-raw://46326c0bb1e296b67185e81c918e0b40501b8b6386165855df0a3f3c634b6a80","dweb:/ipfs/QmTwyrDYtsxsk6pymJTK94PnEpzsmkpUxFuzEiakDopy4Z"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/IAccessControl.sol":{"keccak256":"0xc503b1464e90b1cf79d81239f719f81c35ff646b17b638c87fe87a1d7bc5d94d","urls":["bzz-raw://381076837654e98f1d5dfc3909a3ebb80e2c86a97d662b507320701e09cb7a60","dweb:/ipfs/QmWGwdWe9JWx2ae3n8EhWuY6ipWo6shVg9bct6y5og7v9Y"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/extensions/AccessControlDefaultAdminRules.sol":{"keccak256":"0xd5e43578dce2678fbd458e1221dc37b20e983ecce4a314b422704f07d6015c5b","urls":["bzz-raw://9ea4d9ae3392dc9db1ef4d7ebef84ce7fa243dc14abb46e68eb2eb60d2cd0e93","dweb:/ipfs/QmRfjyDoLWF74EgmpcGkWZM7Kx1LgHN8dZHBxAnU9vPH46"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/extensions/IAccessControlDefaultAdminRules.sol":{"keccak256":"0xc2dbeddf97707bf012827013b4a072bacbe56ad3219c405e30fd2a959e8a5413","urls":["bzz-raw://281289e424c30c2ea92fc25598315117410404cf76a756663ad39ba18fd38b48","dweb:/ipfs/Qma3wmq2cjxpfkKKM7JrvyJzzohsNWNNWsnaf3jVNBD65v"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/interfaces/IERC5313.sol":{"keccak256":"0x22412c268e74cc3cbf550aecc2f7456f6ac40783058e219cfe09f26f4d396621","urls":["bzz-raw://0b841021f25480424d2359de4869e60e77f790f52e8e85f07aa389543024b559","dweb:/ipfs/QmV7U5ehV5xe3QrbE8ErxfWSSzK1T1dGeizXvYPjWpNDGq"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol":{"keccak256":"0xee2337af2dc162a973b4be6d3f7c16f06298259e0af48c5470d2839bfa8a22f4","urls":["bzz-raw://30c476b4b2f405c1bb3f0bae15b006d129c80f1bfd9d0f2038160a3bb9745009","dweb:/ipfs/Qmb3VcuDufv6xbHeVgksC4tHpc5gKYVqBEwjEXW72XzSvN"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Context.sol":{"keccak256":"0x493033a8d1b176a037b2cc6a04dad01a5c157722049bbecf632ca876224dd4b2","urls":["bzz-raw://6a708e8a5bdb1011c2c381c9a5cfd8a9a956d7d0a9dc1bd8bcdaf52f76ef2f12","dweb:/ipfs/Qmax9WHBnVsZP46ZxEMNRQpLQnrdE4dK8LehML1Py8FowF"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Panic.sol":{"keccak256":"0x29074fe5a74bb024c57b3570abf6c74d8bceed3438694d470fd0166a3ecd196a","urls":["bzz-raw://f4f8435ccbc56e384f4cc9ac9ff491cf30a82f2beac00e33ccc2cf8af3f77cc3","dweb:/ipfs/QmUKJXxTe6nn1qfgnX8xbnboNNAPUuEmJyGqMZCKNiFBgn"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/introspection/ERC165.sol":{"keccak256":"0x6fac27fb1885a1d9fd2ce3f8fac4e44a6596ca4d44207c9ef2541ba8c941291e","urls":["bzz-raw://2079378abdb36baec15c23bc2353b73a3d28d1d0610b436b0c1c4e6fa61d65c9","dweb:/ipfs/QmVZkRFMzKW7sLaugKSTbMNnUBKWF3QDsoMi5uoQFyVMjf"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/introspection/IERC165.sol":{"keccak256":"0xc859863e3bda7ec3cddf6dafe2ffe91bcbe648d1395b856b839c32ee9617c44c","urls":["bzz-raw://a9d5417888b873cf2225ed5d50b2a67be97c1504134a2a580512168d587ad82e","dweb:/ipfs/QmNr5fTb2heFW658NZn7dDnofZgFvQTnNxKRJ3wdnR1skX"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/Math.sol":{"keccak256":"0x3233b02fcf2b20a41cce60a62e43c7e5a67a55b738ec1db842a82452e6aa170d","urls":["bzz-raw://231c75d18bc6973533dfa7d58d2b97e504ca4e21d703a5c8b0ec31475e97db67","dweb:/ipfs/QmPJ29HDuFceD1FDr4CnjYYtvaQ234wGAfojZpL3RXFG26"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol":{"keccak256":"0x8cd59334ed58b8884cd1f775afc9400db702e674e5d6a7a438c655b9de788d7e","urls":["bzz-raw://99e62c7de7318f413b6352e3f2704ca23e7725ff144e43c8bd574d12dbf29047","dweb:/ipfs/QmSEXG2rBx1VxU2uFTWdiChjDvA4osEY2mesjmoVeVhHko"],"license":"MIT"},"src/Passage.sol":{"keccak256":"0x81016c92006558f93c028e3d4f61ddad8ff870b956edaa19ad2ccd68ec5d292a","urls":["bzz-raw://dc70a7d97b18e988ce9560f4fabbf9caea3c6178f64fab056b1cf63d27bef6c5","dweb:/ipfs/QmeJDLqvLdhkbWfyLHdYUPoGz7XHWw3zpe8YTCMQE9MacX"],"license":"UNLICENSED"},"src/Zenith.sol":{"keccak256":"0x0febef21c15ebf62421e25337341a8a11a6dd5b5dc2e9ea967a2d4769469ecd6","urls":["bzz-raw://405a8eb90f834ab216e96d40b8c1cfd98c4bc4e71399b09c04ef4123eb3bb1ab","dweb:/ipfs/QmVakr7Upoe2tgU1jQSZUgXE1UASAuHh9kReZ2mfgCsdha"],"license":"UNLICENSED"}},"version":1},"id":35} \ No newline at end of file diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs new file mode 100644 index 000000000..98a8e378c --- /dev/null +++ b/examples/exex/rollup/src/execution.rs @@ -0,0 +1,488 @@ +use alloy_consensus::{SidecarCoder, SimpleCoder}; +use alloy_rlp::Decodable as _; +use eyre::OptionExt; +use reth::transaction_pool::TransactionPool; +use reth_interfaces::executor::BlockValidationError; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_ethereum::EthEvmConfig; +use reth_primitives::{ + constants, + eip4844::kzg_to_versioned_hash, + keccak256, + revm::env::fill_tx_env, + revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, + Address, Block, BlockWithSenders, Bytes, Hardfork, Header, Receipt, TransactionSigned, TxType, + B256, U256, +}; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + DBBox, DatabaseCommit, Evm, StateBuilder, StateDBBox, +}; +use reth_tracing::tracing::debug; + +use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; + +/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle +/// state)[BundleState] and list of (receipts)[Receipt]. +pub async fn execute_block( + db: &mut Database, + pool: &Pool, + tx: &TransactionSigned, + header: &RollupContract::BlockHeader, + block_data: Bytes, + block_data_hash: B256, +) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { + if header.rollupChainId != U256::from(CHAIN_ID) { + eyre::bail!("Invalid rollup chain ID") + } + + // Construct header + let header = construct_header(db, header)?; + + // Decode transactions + let transactions = decode_transactions(pool, tx, block_data, block_data_hash).await?; + + // Configure EVM + let evm_config = EthEvmConfig::default(); + let mut evm = configure_evm(&evm_config, db, &header); + + // Execute transactions + let (executed_txs, receipts, results) = execute_transactions(&mut evm, &header, transactions)?; + + // Construct block and recover senders + let block = Block { header, body: executed_txs, ..Default::default() } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let bundle = evm.db_mut().take_bundle(); + + Ok((block, bundle, receipts, results)) +} + +/// Construct header from the given rollup header. +fn construct_header(db: &Database, header: &RollupContract::BlockHeader) -> eyre::Result
{ + let parent_block = if !header.sequence.is_zero() { + db.get_block(header.sequence - U256::from(1))? + } else { + None + }; + + let block_number = u64::try_from(header.sequence)?; + + // Calculate base fee per gas for EIP-1559 transactions + let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; + + // Construct header + Ok(Header { + parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), + number: block_number, + gas_limit: u64::try_from(header.gasLimit)?, + timestamp: u64::try_from(header.confirmBy)?, + base_fee_per_gas: Some(base_fee_per_gas), + ..Default::default() + }) +} + +/// Configure EVM with the given database and header. +fn configure_evm<'a>( + config: &'a EthEvmConfig, + db: &'a mut Database, + header: &Header, +) -> Evm<'a, (), StateDBBox<'a, eyre::Report>> { + let mut evm = config.evm( + StateBuilder::new_with_database(Box::new(db) as DBBox<'_, eyre::Report>) + .with_bundle_update() + .build(), + ); + evm.db_mut().set_state_clear_flag( + CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + ); + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); + EthEvmConfig::fill_cfg_and_block_env( + &mut cfg, + evm.block_mut(), + &CHAIN_SPEC, + header, + U256::ZERO, + ); + *evm.cfg_mut() = cfg.cfg_env; + + evm +} + +/// Decode transactions from the block data and recover senders. +/// - If the transaction is a blob-carrying one, decode the blobs either using the local transaction +/// pool, or querying Blobscan. +/// - If the transaction is a regular one, decode the block data directly. +async fn decode_transactions( + pool: &Pool, + tx: &TransactionSigned, + block_data: Bytes, + block_data_hash: B256, +) -> eyre::Result> { + // Get raw transactions either from the blobs, or directly from the block data + let raw_transactions = if matches!(tx.tx_type(), TxType::Eip4844) { + let blobs: Vec<_> = if let Some(sidecar) = pool.get_blob(tx.hash)? { + // Try to get blobs from the transaction pool + sidecar.blobs.into_iter().zip(sidecar.commitments).collect() + } else { + // If transaction is not found in the pool, try to get blobs from Blobscan + let blobscan_client = foundry_blob_explorers::Client::holesky(); + let sidecar = blobscan_client.transaction(tx.hash).await?.blob_sidecar(); + sidecar + .blobs + .into_iter() + .map(|blob| (*blob).into()) + .zip(sidecar.commitments.into_iter().map(|commitment| (*commitment).into())) + .collect() + }; + + // Decode blob hashes from block data + let blob_hashes = Vec::::decode(&mut block_data.as_ref())?; + + // Filter blobs that are present in the block data + let blobs = blobs + .into_iter() + // Convert blob KZG commitments to versioned hashes + .map(|(blob, commitment)| (blob, kzg_to_versioned_hash((*commitment).into()))) + // Filter only blobs that are present in the block data + .filter(|(_, hash)| blob_hashes.contains(hash)) + .map(|(blob, _)| blob) + .collect::>(); + if blobs.len() != blob_hashes.len() { + eyre::bail!("some blobs not found") + } + + // Decode blobs and concatenate them to get the raw transactions + let data = SimpleCoder::default() + .decode_all(&blobs) + .ok_or(eyre::eyre!("failed to decode blobs"))? + .concat(); + + data.into() + } else { + block_data + }; + + let raw_transaction_hash = keccak256(&raw_transactions); + if raw_transaction_hash != block_data_hash { + eyre::bail!("block data hash mismatch") + } + + // Decode block data, filter only transactions with the correct chain ID and recover senders + let transactions = Vec::::decode(&mut raw_transactions.as_ref())? + .into_iter() + .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) + .map(|tx| { + let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; + Ok((tx, sender)) + }) + .collect::>()?; + + Ok(transactions) +} + +/// Execute transactions and return the list of executed transactions, receipts and +/// execution results. +fn execute_transactions( + evm: &mut Evm<'_, (), StateDBBox<'_, eyre::Report>>, + header: &Header, + transactions: Vec<(TransactionSigned, Address)>, +) -> eyre::Result<(Vec, Vec, Vec)> { + let mut receipts = Vec::with_capacity(transactions.len()); + let mut executed_txs = Vec::with_capacity(transactions.len()); + let mut results = Vec::with_capacity(transactions.len()); + if !transactions.is_empty() { + let mut cumulative_gas_used = 0; + for (transaction, sender) in transactions { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + // Execute transaction. + // Fill revm structure. + fill_tx_env(evm.tx_mut(), &transaction, sender); + + let ResultAndState { result, state } = match evm.transact() { + Ok(result) => result, + Err(err) => { + match err { + EVMError::Transaction(err) => { + // if the transaction is invalid, we can skip it + debug!(%err, ?transaction, "Skipping invalid transaction"); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + eyre::bail!(err) + } + } + } + }; + + debug!(?transaction, ?result, ?state, "Executed transaction"); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + #[allow(clippy::needless_update)] // side-effect of optimism fields + receipts.push(Receipt { + tx_type: transaction.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().iter().cloned().map(Into::into).collect(), + ..Default::default() + }); + + // append transaction to the list of executed transactions + executed_txs.push(transaction); + results.push(result); + } + + evm.db_mut().merge_transitions(BundleRetention::Reverts); + } + + Ok((executed_txs, receipts, results)) +} + +#[cfg(test)] +mod tests { + use std::time::{SystemTime, UNIX_EPOCH}; + + use alloy_consensus::{SidecarBuilder, SimpleCoder}; + use alloy_sol_types::{sol, SolCall}; + use reth::transaction_pool::{ + test_utils::{testing_pool, MockTransaction}, + TransactionOrigin, TransactionPool, + }; + use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; + use reth_primitives::{ + bytes, + constants::ETH_TO_WEI, + keccak256, public_key_to_address, + revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, + BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, + }; + use reth_revm::Evm; + use rusqlite::Connection; + use secp256k1::{Keypair, Secp256k1}; + + use crate::{ + db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, + ROLLUP_SUBMITTER_ADDRESS, + }; + + sol!( + WETH, + r#" +[ + { + "constant":true, + "inputs":[ + { + "name":"", + "type":"address" + } + ], + "name":"balanceOf", + "outputs":[ + { + "name":"", + "type":"uint256" + } + ], + "payable":false, + "stateMutability":"view", + "type":"function" + } +] + "# + ); + + #[tokio::test] + async fn test_execute_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let mut database = Database::new(Connection::open_in_memory()?)?; + + // Create key pair + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut generators::rng()); + let sender_address = public_key_to_address(key_pair.public_key()); + + // Deposit some ETH to the sender and insert it into database + database.upsert_account(sender_address, |_| { + Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) + })?; + + // WETH deployment transaction sent using calldata + let (_, _, results) = execute_transaction( + &mut database, + key_pair, + 0, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 1, + gas_limit: 1_500_000, + gas_price: 1_500_000_000, + to: TxKind::Create, + // WETH9 bytecode + input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), + ..Default::default() + }), + BlockDataSource::Calldata + ).await?; + + let weth_address = match results.first() { + Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { + *address + } + _ => eyre::bail!("WETH contract address not found"), + }; + + // WETH deposit transaction sent using blobs + execute_transaction( + &mut database, + key_pair, + 1, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 2, + gas_limit: 50000, + gas_price: 1_500_000_000, + to: TxKind::Call(weth_address), + value: U256::from(0.5 * ETH_TO_WEI as f64), + input: bytes!("d0e30db0"), + ..Default::default() + }), + BlockDataSource::Blobs, + ) + .await?; + + // Verify WETH balance + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!( + result.output(), + Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) + ); + drop(evm); + + // Verify nonce + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 3); + + // Revert block with WETH deposit transaction + database.revert_tip_block(U256::from(1))?; + + // Verify WETH balance after revert + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); + drop(evm); + + // Verify nonce after revert + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 2); + + Ok(()) + } + + enum BlockDataSource { + Calldata, + Blobs, + } + + async fn execute_transaction( + database: &mut Database, + key_pair: Keypair, + sequence: BlockNumber, + tx: Transaction, + block_data_source: BlockDataSource, + ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { + // Construct block header + let block_header = BlockHeader { + rollupChainId: U256::from(CHAIN_ID), + sequence: U256::from(sequence), + confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), + gasLimit: U256::from(30_000_000), + rewardAddress: ROLLUP_SUBMITTER_ADDRESS, + }; + let encoded_transactions = + alloy_rlp::encode(vec![sign_tx_with_key_pair(key_pair, tx).envelope_encoded()]); + let block_data_hash = keccak256(&encoded_transactions); + + let pool = testing_pool(); + + let (block_data, l1_transaction) = match block_data_source { + BlockDataSource::Calldata => ( + encoded_transactions, + sign_tx_with_key_pair(key_pair, Transaction::Eip2930(TxEip2930::default())), + ), + BlockDataSource::Blobs => { + let sidecar = + SidecarBuilder::::from_slice(&encoded_transactions).build()?; + let blob_hashes = alloy_rlp::encode(sidecar.versioned_hashes().collect::>()); + + let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar.into()); + let transaction = + sign_tx_with_key_pair(key_pair, Transaction::from(mock_transaction.clone())); + mock_transaction.set_hash(transaction.hash); + pool.add_transaction(TransactionOrigin::Local, mock_transaction).await?; + (blob_hashes, transaction) + } + }; + + // Execute block and insert into database + let (block, bundle, receipts, results) = execute_block( + database, + &pool, + &l1_transaction, + &block_header, + block_data.into(), + block_data_hash, + ) + .await?; + let block = block.seal_slow(); + database.insert_block_with_bundle(&block, bundle)?; + + Ok((block, receipts, results)) + } +} diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index db33aaf72..f3e7f00bc 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -2,41 +2,33 @@ //! processing deposits and storing all related data in an SQLite database. //! //! The rollup contract accepts blocks of transactions and deposits of ETH and is deployed on -//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . +//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . -use alloy_rlp::Decodable; use alloy_sol_types::{sol, SolEventInterface, SolInterface}; use db::Database; -use eyre::OptionExt; +use execution::execute_block; use once_cell::sync::Lazy; use reth_exex::{ExExContext, ExExEvent}; -use reth_interfaces::executor::BlockValidationError; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeComponents}; -use reth_node_ethereum::{EthEvmConfig, EthereumNode}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; use reth_primitives::{ - address, constants, - revm::env::fill_tx_env, - revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, - Address, Block, BlockWithSenders, Bytes, ChainSpec, ChainSpecBuilder, Genesis, Hardfork, - Header, Receipt, SealedBlockWithSenders, TransactionSigned, U256, + address, Address, ChainSpec, ChainSpecBuilder, Genesis, SealedBlockWithSenders, + TransactionSigned, U256, }; use reth_provider::Chain; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - DatabaseCommit, StateBuilder, -}; -use reth_tracing::tracing::{debug, error, info}; +use reth_tracing::tracing::{error, info}; use rusqlite::Connection; use std::sync::Arc; mod db; +mod execution; sol!(RollupContract, "rollup_abi.json"); use RollupContract::{RollupContractCalls, RollupContractEvents}; const DATABASE_PATH: &str = "rollup.db"; -const ROLLUP_CONTRACT_ADDRESS: Address = address!("74ae65DF20cB0e3BF8c022051d0Cdd79cc60890C"); -const ROLLUP_SUBMITTER_ADDRESS: Address = address!("B01042Db06b04d3677564222010DF5Bd09C5A947"); +const ROLLUP_CONTRACT_ADDRESS: Address = address!("97C0E40c6B5bb5d4fa3e2AA1C6b8bC7EA5ECAe31"); +const ROLLUP_SUBMITTER_ADDRESS: Address = address!("5b0517Dc94c413a5871536872605522E54C85a03"); const CHAIN_ID: u64 = 17001; static CHAIN_SPEC: Lazy> = Lazy::new(|| { Arc::new( @@ -67,7 +59,7 @@ impl Rollup { } if let Some(committed_chain) = notification.committed_chain() { - self.commit(&committed_chain)?; + self.commit(&committed_chain).await?; self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; } } @@ -79,7 +71,7 @@ impl Rollup { /// /// This function decodes all transactions to the rollup contract into events, executes the /// corresponding actions and inserts the results into the database. - fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { + async fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { let events = decode_chain_into_rollup_events(chain); for (_, tx, event) in events { @@ -87,7 +79,10 @@ impl Rollup { // A new block is submitted to the rollup contract. // The block is executed on top of existing rollup state and committed into the // database. - RollupContractEvents::BlockSubmitted(_) => { + RollupContractEvents::BlockSubmitted(RollupContract::BlockSubmitted { + blockDataHash, + .. + }) => { let call = RollupContractCalls::abi_decode(tx.input(), true)?; if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { @@ -96,12 +91,21 @@ impl Rollup { .. }) = call { - match execute_block(&mut self.db, &header, blockData) { + match execute_block( + &mut self.db, + self.ctx.pool(), + tx, + &header, + blockData, + blockDataHash, + ) + .await + { Ok((block, bundle, _, _)) => { let block = block.seal_slow(); self.db.insert_block_with_bundle(&block, bundle)?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), chain_id = %header.rollupChainId, sequence = %header.sequence, transactions = block.body.len(), @@ -111,7 +115,7 @@ impl Rollup { Err(err) => { error!( %err, - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), chain_id = %header.rollupChainId, sequence = %header.sequence, "Failed to execute block" @@ -123,12 +127,17 @@ impl Rollup { // A deposit of ETH to the rollup contract. The deposit is added to the recipient's // balance and committed into the database. RollupContractEvents::Enter(RollupContract::Enter { + rollupChainId, token, rollupRecipient, amount, }) => { + if rollupChainId != U256::from(CHAIN_ID) { + error!(tx_hash = %tx.recalculate_hash(), "Invalid rollup chain ID"); + continue + } if token != Address::ZERO { - error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + error!(tx_hash = %tx.recalculate_hash(), "Only ETH deposits are supported"); continue } @@ -139,7 +148,7 @@ impl Rollup { })?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), %amount, recipient = %rollupRecipient, "Deposit", @@ -174,7 +183,7 @@ impl Rollup { { self.db.revert_tip_block(header.sequence)?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), chain_id = %header.rollupChainId, sequence = %header.sequence, "Block reverted" @@ -183,12 +192,17 @@ impl Rollup { } // The deposit is subtracted from the recipient's balance. RollupContractEvents::Enter(RollupContract::Enter { + rollupChainId, token, rollupRecipient, amount, }) => { + if rollupChainId != U256::from(CHAIN_ID) { + error!(tx_hash = %tx.recalculate_hash(), "Invalid rollup chain ID"); + continue + } if token != Address::ZERO { - error!(tx_hash = %tx.hash, "Only ETH deposits are supported"); + error!(tx_hash = %tx.recalculate_hash(), "Only ETH deposits are supported"); continue } @@ -199,7 +213,7 @@ impl Rollup { })?; info!( - tx_hash = %tx.hash, + tx_hash = %tx.recalculate_hash(), %amount, recipient = %rollupRecipient, "Deposit reverted", @@ -242,153 +256,6 @@ fn decode_chain_into_rollup_events( .collect() } -/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle -/// state)[BundleState] and list of (receipts)[Receipt]. -fn execute_block( - db: &mut Database, - header: &RollupContract::BlockHeader, - block_data: Bytes, -) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { - if header.rollupChainId != U256::from(CHAIN_ID) { - eyre::bail!("Invalid rollup chain ID") - } - - let block_number = u64::try_from(header.sequence)?; - let parent_block = if !header.sequence.is_zero() { - db.get_block(header.sequence - U256::from(1))? - } else { - None - }; - - // Calculate base fee per gas for EIP-1559 transactions - let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { - constants::EIP1559_INITIAL_BASE_FEE - } else { - parent_block - .as_ref() - .ok_or(eyre::eyre!("parent block not found"))? - .header - .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) - .ok_or(eyre::eyre!("failed to calculate base fee"))? - }; - - // Construct header - let header = Header { - parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), - number: block_number, - gas_limit: u64::try_from(header.gasLimit)?, - timestamp: u64::try_from(header.confirmBy)?, - base_fee_per_gas: Some(base_fee_per_gas), - ..Default::default() - }; - - // Decode block data, filter only transactions with the correct chain ID and recover senders - let transactions = Vec::::decode(&mut block_data.as_ref())? - .into_iter() - .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) - .map(|tx| { - let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; - Ok((tx, sender)) - }) - .collect::>>()?; - - // Execute block - let state = StateBuilder::new_with_database( - Box::new(db) as Box + Send> - ) - .with_bundle_update() - .build(); - let evm_config = EthEvmConfig::default(); - let mut evm = evm_config.evm(state); - - // Set state clear flag. - evm.db_mut().set_state_clear_flag( - CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), - ); - - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); - EthEvmConfig::fill_cfg_and_block_env( - &mut cfg, - evm.block_mut(), - &CHAIN_SPEC, - &header, - U256::ZERO, - ); - *evm.cfg_mut() = cfg.cfg_env; - - let mut receipts = Vec::with_capacity(transactions.len()); - let mut executed_txs = Vec::with_capacity(transactions.len()); - let mut results = Vec::with_capacity(transactions.len()); - if !transactions.is_empty() { - let mut cumulative_gas_used = 0; - for (transaction, sender) in transactions { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - // TODO(alexey): what to do here? - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - // Execute transaction. - // Fill revm structure. - fill_tx_env(evm.tx_mut(), &transaction, sender); - - let ResultAndState { result, state } = match evm.transact() { - Ok(result) => result, - Err(err) => { - match err { - EVMError::Transaction(err) => { - // if the transaction is invalid, we can skip it - debug!(%err, ?transaction, "Skipping invalid transaction"); - continue - } - err => { - // this is an error that we should treat as fatal for this attempt - eyre::bail!(err) - } - } - } - }; - - debug!(?transaction, ?result, ?state, "Executed transaction"); - - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - #[allow(clippy::needless_update)] // side-effect of optimism fields - receipts.push(Receipt { - tx_type: transaction.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.logs().iter().cloned().map(Into::into).collect(), - ..Default::default() - }); - - // append transaction to the list of executed transactions - executed_txs.push(transaction); - results.push(result); - } - - evm.db_mut().merge_transitions(BundleRetention::Reverts); - } - - // Construct block and recover senders - let block = Block { header, body: executed_txs, ..Default::default() } - .with_recovered_senders() - .ok_or_eyre("failed to recover senders")?; - - let bundle = evm.db_mut().take_bundle(); - - Ok((block, bundle, receipts, results)) -} - fn main() -> eyre::Result<()> { reth::cli::Cli::parse_args().run(|builder, _| async move { let handle = builder @@ -404,184 +271,3 @@ fn main() -> eyre::Result<()> { handle.wait_for_node_exit().await }) } - -#[cfg(test)] -mod tests { - use std::time::{SystemTime, UNIX_EPOCH}; - - use alloy_sol_types::{sol, SolCall}; - use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; - use reth_primitives::{ - bytes, - constants::ETH_TO_WEI, - public_key_to_address, - revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, - BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, - }; - use reth_revm::Evm; - use rusqlite::Connection; - use secp256k1::{Keypair, Secp256k1}; - - use crate::{ - db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, - ROLLUP_SUBMITTER_ADDRESS, - }; - - sol!( - WETH, - r#" -[ - { - "constant":true, - "inputs":[ - { - "name":"", - "type":"address" - } - ], - "name":"balanceOf", - "outputs":[ - { - "name":"", - "type":"uint256" - } - ], - "payable":false, - "stateMutability":"view", - "type":"function" - } -] - "# - ); - - #[test] - fn test_execute_block() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - let mut database = Database::new(Connection::open_in_memory()?)?; - - // Create key pair - let secp = Secp256k1::new(); - let key_pair = Keypair::new(&secp, &mut generators::rng()); - let sender_address = public_key_to_address(key_pair.public_key()); - - // Deposit some ETH to the sender and insert it into database - database.upsert_account(sender_address, |_| { - Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) - })?; - - // WETH deployment transaction - let (_, _, results) = execute_transaction( - &mut database, - key_pair, - 0, - Transaction::Eip2930(TxEip2930 { - chain_id: CHAIN_ID, - nonce: 1, - gas_limit: 1_500_000, - gas_price: 1_500_000_000, - to: TxKind::Create, - // WETH9 bytecode - input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), - ..Default::default() - }) - )?; - - let weth_address = match results.first() { - Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { - *address - } - _ => eyre::bail!("WETH contract address not found"), - }; - - // WETH deposit transaction - execute_transaction( - &mut database, - key_pair, - 1, - Transaction::Eip2930(TxEip2930 { - chain_id: CHAIN_ID, - nonce: 2, - gas_limit: 50000, - gas_price: 1_500_000_000, - to: TxKind::Call(weth_address), - value: U256::from(0.5 * ETH_TO_WEI as f64), - input: bytes!("d0e30db0"), - ..Default::default() - }), - )?; - - // Verify WETH balance - let mut evm = Evm::builder() - .with_db(&mut database) - .with_tx_env(TxEnv { - caller: sender_address, - gas_limit: 50_000_000, - transact_to: TransactTo::Call(weth_address), - data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), - ..Default::default() - }) - .build(); - let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; - assert_eq!( - result.output(), - Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) - ); - drop(evm); - - // Verify nonce - let account = database.get_account(sender_address)?.unwrap(); - assert_eq!(account.nonce, 3); - - // Revert block with WETH deposit transaction - database.revert_tip_block(U256::from(1))?; - - // Verify WETH balance after revert - let mut evm = Evm::builder() - .with_db(&mut database) - .with_tx_env(TxEnv { - caller: sender_address, - gas_limit: 50_000_000, - transact_to: TransactTo::Call(weth_address), - data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), - ..Default::default() - }) - .build(); - let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; - assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); - drop(evm); - - // Verify nonce after revert - let account = database.get_account(sender_address)?.unwrap(); - assert_eq!(account.nonce, 2); - - Ok(()) - } - - fn execute_transaction( - database: &mut Database, - key_pair: Keypair, - sequence: BlockNumber, - tx: Transaction, - ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { - let signed_tx = sign_tx_with_key_pair(key_pair, tx); - - // Construct block header and data - let block_header = BlockHeader { - rollupChainId: U256::from(CHAIN_ID), - sequence: U256::from(sequence), - confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), - gasLimit: U256::from(30_000_000), - rewardAddress: ROLLUP_SUBMITTER_ADDRESS, - }; - let block_data = alloy_rlp::encode(vec![signed_tx.envelope_encoded()]); - - // Execute block and insert into database - let (block, bundle, receipts, results) = - execute_block(database, &block_header, block_data.into())?; - let block = block.seal_slow(); - database.insert_block_with_bundle(&block, bundle)?; - - Ok((block, receipts, results)) - } -} From a590ed7ce5af3bbf6feb3cd5a626334311e341d7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 2 May 2024 22:34:13 +0200 Subject: [PATCH 180/250] chore(discv5): pub methods (#8057) --- crates/net/discv5/src/config.rs | 2 +- crates/net/discv5/src/filter.rs | 22 +- crates/net/discv5/src/lib.rs | 343 ++++++++++++++++---------------- 3 files changed, 182 insertions(+), 185 deletions(-) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 3a506902e..05c2863c8 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -143,7 +143,7 @@ impl ConfigBuilder { } /// Sets the tcp port to advertise in the local [`Enr`](discv5::enr::Enr). - fn tcp_port(mut self, port: u16) -> Self { + pub fn tcp_port(mut self, port: u16) -> Self { self.tcp_port = port; self } diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 2e20e2fbd..d62a7584a 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -35,14 +35,12 @@ impl MustIncludeKey { /// Returns [`FilterOutcome::Ok`] if [`Enr`](discv5::Enr) contains the configured kv-pair key. pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { if enr.get_raw_rlp(self.key).is_none() { - return FilterOutcome::Ignore { reason: self.ignore_reason() } + return FilterOutcome::Ignore { + reason: format!("{} fork required", String::from_utf8_lossy(self.key)), + } } FilterOutcome::Ok } - - fn ignore_reason(&self) -> String { - format!("{} fork required", String::from_utf8_lossy(self.key)) - } } /// Filter requiring that peers not advertise kv-pairs using certain keys, e.g. b"eth2". @@ -69,20 +67,18 @@ impl MustNotIncludeKeys { pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { for key in self.keys.iter() { if matches!(key.filter(enr), FilterOutcome::Ok) { - return FilterOutcome::Ignore { reason: self.ignore_reason() } + return FilterOutcome::Ignore { + reason: format!( + "{} forks not allowed", + self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") + ), + } } } FilterOutcome::Ok } - fn ignore_reason(&self) -> String { - format!( - "{} forks not allowed", - self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") - ) - } - /// Adds a key that must not be present for any kv-pair in a node record. pub fn add_disallowed_keys(&mut self, keys: &[&'static [u8]]) { for key in keys { diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index b8b2eab24..8895f8a16 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -161,7 +161,7 @@ impl Discv5 { // // 1. make local enr from listen config // - let (enr, bc_enr, fork_key, ip_mode) = Self::build_local_enr(sk, &discv5_config); + let (enr, bc_enr, fork_key, ip_mode) = build_local_enr(sk, &discv5_config); trace!(target: "net::discv5", ?enr, @@ -197,14 +197,14 @@ impl Discv5 { // // 3. add boot nodes // - Self::bootstrap(bootstrap_nodes, &discv5).await?; + bootstrap(bootstrap_nodes, &discv5).await?; let metrics = Discv5Metrics::default(); // // 4. start bg kbuckets maintenance // - Self::spawn_populate_kbuckets_bg( + spawn_populate_kbuckets_bg( lookup_interval, bootstrap_lookup_interval, bootstrap_lookup_countdown, @@ -219,169 +219,6 @@ impl Discv5 { )) } - fn build_local_enr( - sk: &SecretKey, - config: &Config, - ) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { - let mut builder = discv5::enr::Enr::builder(); - - let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; - - let (ip_mode, socket) = match discv5_config.listen_config { - ListenConfig::Ipv4 { ip, port } => { - if ip != Ipv4Addr::UNSPECIFIED { - builder.ip4(ip); - } - builder.udp4(port); - builder.tcp4(*tcp_port); - - (IpMode::Ip4, (ip, port).into()) - } - ListenConfig::Ipv6 { ip, port } => { - if ip != Ipv6Addr::UNSPECIFIED { - builder.ip6(ip); - } - builder.udp6(port); - builder.tcp6(*tcp_port); - - (IpMode::Ip6, (ip, port).into()) - } - ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { - if ipv4 != Ipv4Addr::UNSPECIFIED { - builder.ip4(ipv4); - } - builder.udp4(ipv4_port); - builder.tcp4(*tcp_port); - - if ipv6 != Ipv6Addr::UNSPECIFIED { - builder.ip6(ipv6); - } - builder.udp6(ipv6_port); - - (IpMode::DualStack, (ipv6, ipv6_port).into()) - } - }; - - // identifies which network node is on - let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { - builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); - *network_stack_id - }); - - // add other data - for (key, value) in other_enr_kv_pairs { - builder.add_value_rlp(key, value.clone().into()); - } - - // enr v4 not to get confused with discv4, independent versioning enr and - // discovery - let enr = builder.build(sk).expect("should build enr v4"); - - // backwards compatible enr - let bc_enr = NodeRecord::from_secret_key(socket, sk); - - (enr, bc_enr, network_stack_id, ip_mode) - } - - /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. - async fn bootstrap( - bootstrap_nodes: HashSet, - discv5: &Arc, - ) -> Result<(), Error> { - trace!(target: "net::discv5", - ?bootstrap_nodes, - "adding bootstrap nodes .." - ); - - let mut enr_requests = vec![]; - for node in bootstrap_nodes { - match node { - BootNode::Enr(node) => { - if let Err(err) = discv5.add_enr(node) { - return Err(Error::AddNodeFailed(err)) - } - } - BootNode::Enode(enode) => { - let discv5 = discv5.clone(); - enr_requests.push(async move { - if let Err(err) = discv5.request_enr(enode.to_string()).await { - debug!(target: "net::discv5", - ?enode, - %err, - "failed adding boot node" - ); - } - }) - } - } - } - - // If a session is established, the ENR is added straight away to discv5 kbuckets - Ok(_ = join_all(enr_requests).await) - } - - /// Backgrounds regular look up queries, in order to keep kbuckets populated. - fn spawn_populate_kbuckets_bg( - lookup_interval: u64, - bootstrap_lookup_interval: u64, - bootstrap_lookup_countdown: u64, - metrics: Discv5Metrics, - discv5: Arc, - ) { - task::spawn({ - let local_node_id = discv5.local_enr().node_id(); - let lookup_interval = Duration::from_secs(lookup_interval); - let metrics = metrics.discovered_peers; - let mut kbucket_index = MAX_KBUCKET_INDEX; - let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); - // todo: graceful shutdown - - async move { - // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest - // log2distance from local node - for i in (0..bootstrap_lookup_countdown).rev() { - let target = discv5::enr::NodeId::random(); - - trace!(target: "net::discv5", - %target, - bootstrap_boost_runs_countdown=i, - lookup_interval=format!("{:#?}", pulse_lookup_interval), - "starting bootstrap boost lookup query" - ); - - lookup(target, &discv5, &metrics).await; - - tokio::time::sleep(pulse_lookup_interval).await; - } - - // initiate regular lookups to populate kbuckets - loop { - // make sure node is connected to each subtree in the network by target - // selection (ref kademlia) - let target = get_lookup_target(kbucket_index, local_node_id); - - trace!(target: "net::discv5", - %target, - lookup_interval=format!("{:#?}", lookup_interval), - "starting periodic lookup query" - ); - - lookup(target, &discv5, &metrics).await; - - if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { - // try to populate bucket one step closer - kbucket_index -= 1 - } else { - // start over with bucket furthest away - kbucket_index = MAX_KBUCKET_INDEX - } - - tokio::time::sleep(lookup_interval).await; - } - } - }); - } - /// Process an event from the underlying [`discv5::Discv5`] node. pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { match update { @@ -416,7 +253,7 @@ impl Discv5 { } /// Processes a discovered peer. Returns `true` if peer is added to - fn on_discovered_peer( + pub fn on_discovered_peer( &mut self, enr: &discv5::Enr, socket: SocketAddr, @@ -467,7 +304,7 @@ impl Discv5 { /// /// Note: [`discv5::Discv5`] won't initiate a session with any peer with a malformed node /// record, that advertises a reserved IP address on a WAN network. - fn try_into_reachable( + pub fn try_into_reachable( &self, enr: &discv5::Enr, socket: SocketAddr, @@ -490,13 +327,13 @@ impl Discv5 { /// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be /// passed up to app, and [`Ignore`](FilterOutcome::Ignore) if peer should instead be dropped. - fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { + pub fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { self.discovered_peer_filter.filter(enr) } /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network /// stack, if field is set. - fn get_fork_id( + pub fn get_fork_id( &self, enr: &discv5::enr::Enr, ) -> Result { @@ -551,6 +388,170 @@ pub struct DiscoveredPeer { pub fork_id: Option, } +/// Builds the local ENR with the supplied key. +pub fn build_local_enr( + sk: &SecretKey, + config: &Config, +) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { + let mut builder = discv5::enr::Enr::builder(); + + let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; + + let (ip_mode, socket) = match discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => { + if ip != Ipv4Addr::UNSPECIFIED { + builder.ip4(ip); + } + builder.udp4(port); + builder.tcp4(*tcp_port); + + (IpMode::Ip4, (ip, port).into()) + } + ListenConfig::Ipv6 { ip, port } => { + if ip != Ipv6Addr::UNSPECIFIED { + builder.ip6(ip); + } + builder.udp6(port); + builder.tcp6(*tcp_port); + + (IpMode::Ip6, (ip, port).into()) + } + ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { + if ipv4 != Ipv4Addr::UNSPECIFIED { + builder.ip4(ipv4); + } + builder.udp4(ipv4_port); + builder.tcp4(*tcp_port); + + if ipv6 != Ipv6Addr::UNSPECIFIED { + builder.ip6(ipv6); + } + builder.udp6(ipv6_port); + + (IpMode::DualStack, (ipv6, ipv6_port).into()) + } + }; + + // identifies which network node is on + let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { + builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); + *network_stack_id + }); + + // add other data + for (key, value) in other_enr_kv_pairs { + builder.add_value_rlp(key, value.clone().into()); + } + + // enr v4 not to get confused with discv4, independent versioning enr and + // discovery + let enr = builder.build(sk).expect("should build enr v4"); + + // backwards compatible enr + let bc_enr = NodeRecord::from_secret_key(socket, sk); + + (enr, bc_enr, network_stack_id, ip_mode) +} + +/// Bootstraps underlying [`discv5::Discv5`] node with configured peers. +pub async fn bootstrap( + bootstrap_nodes: HashSet, + discv5: &Arc, +) -> Result<(), Error> { + trace!(target: "net::discv5", + ?bootstrap_nodes, + "adding bootstrap nodes .." + ); + + let mut enr_requests = vec![]; + for node in bootstrap_nodes { + match node { + BootNode::Enr(node) => { + if let Err(err) = discv5.add_enr(node) { + return Err(Error::AddNodeFailed(err)) + } + } + BootNode::Enode(enode) => { + let discv5 = discv5.clone(); + enr_requests.push(async move { + if let Err(err) = discv5.request_enr(enode.to_string()).await { + debug!(target: "net::discv5", + ?enode, + %err, + "failed adding boot node" + ); + } + }) + } + } + } + + // If a session is established, the ENR is added straight away to discv5 kbuckets + Ok(_ = join_all(enr_requests).await) +} + +/// Backgrounds regular look up queries, in order to keep kbuckets populated. +pub fn spawn_populate_kbuckets_bg( + lookup_interval: u64, + bootstrap_lookup_interval: u64, + bootstrap_lookup_countdown: u64, + metrics: Discv5Metrics, + discv5: Arc, +) { + task::spawn({ + let local_node_id = discv5.local_enr().node_id(); + let lookup_interval = Duration::from_secs(lookup_interval); + let metrics = metrics.discovered_peers; + let mut kbucket_index = MAX_KBUCKET_INDEX; + let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); + // todo: graceful shutdown + + async move { + // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest + // log2distance from local node + for i in (0..bootstrap_lookup_countdown).rev() { + let target = discv5::enr::NodeId::random(); + + trace!(target: "net::discv5", + %target, + bootstrap_boost_runs_countdown=i, + lookup_interval=format!("{:#?}", pulse_lookup_interval), + "starting bootstrap boost lookup query" + ); + + lookup(target, &discv5, &metrics).await; + + tokio::time::sleep(pulse_lookup_interval).await; + } + + // initiate regular lookups to populate kbuckets + loop { + // make sure node is connected to each subtree in the network by target + // selection (ref kademlia) + let target = get_lookup_target(kbucket_index, local_node_id); + + trace!(target: "net::discv5", + %target, + lookup_interval=format!("{:#?}", lookup_interval), + "starting periodic lookup query" + ); + + lookup(target, &discv5, &metrics).await; + + if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { + // try to populate bucket one step closer + kbucket_index -= 1 + } else { + // start over with bucket furthest away + kbucket_index = MAX_KBUCKET_INDEX + } + + tokio::time::sleep(lookup_interval).await; + } + } + }); +} + /// Gets the next lookup target, based on which bucket is currently being targeted. pub fn get_lookup_target( kbucket_index: usize, @@ -846,7 +847,7 @@ mod tests { let config = Config::builder(TCP_PORT).fork(NetworkStackId::ETH, fork_id).build(); let sk = SecretKey::new(&mut thread_rng()); - let (enr, _, _, _) = Discv5::build_local_enr(&sk, &config); + let (enr, _, _, _) = build_local_enr(&sk, &config); let decoded_fork_id = enr .get_decodable::(NetworkStackId::ETH) From aef1bcc4359c2db80b116cf6198369122b1adebe Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 3 May 2024 12:18:41 +0200 Subject: [PATCH 181/250] chore: make clippy happy (#8068) --- bin/reth/src/commands/db/mod.rs | 2 +- bin/reth/src/utils.rs | 4 ++-- crates/blockchain-tree/src/blockchain_tree.rs | 4 ++-- crates/consensus/auto-seal/src/lib.rs | 2 +- crates/consensus/beacon/src/engine/hooks/controller.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 4 ++-- crates/exex/src/manager.rs | 2 +- crates/metrics/src/common/mpsc.rs | 2 +- crates/net/discv4/src/lib.rs | 4 ++-- crates/net/discv4/src/test_utils.rs | 2 +- crates/net/discv5/src/lib.rs | 6 +++--- crates/net/dns/src/lib.rs | 4 ++-- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/net/downloaders/src/file_client.rs | 2 +- crates/net/downloaders/src/headers/reverse_headers.rs | 4 ++-- crates/net/eth-wire/src/multiplex.rs | 2 +- crates/net/eth-wire/src/muxdemux.rs | 2 +- crates/net/network/src/budget.rs | 1 + crates/net/network/src/eth_requests.rs | 6 +++--- crates/net/network/src/fetch/mod.rs | 2 +- crates/net/network/src/manager.rs | 2 +- crates/net/network/src/state.rs | 2 +- crates/net/network/src/transactions/fetcher.rs | 6 +++--- crates/node/events/src/node.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 6 +++--- crates/rpc/rpc/src/eth/api/sign.rs | 2 +- crates/stages-api/src/pipeline/mod.rs | 2 +- crates/stages/src/stages/merkle.rs | 2 +- crates/storage/libmdbx-rs/src/lib.rs | 2 +- crates/storage/nippy-jar/src/lib.rs | 10 +++++----- crates/storage/nippy-jar/src/writer.rs | 2 +- .../provider/src/providers/static_file/writer.rs | 2 +- crates/storage/provider/src/test_utils/events.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 4 ++-- 34 files changed, 54 insertions(+), 53 deletions(-) diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index aeaf1d7e8..6eedabcc7 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -159,7 +159,7 @@ impl Command { let provider_factory = ProviderFactory::new(db, self.chain.clone(), static_files_path.clone())?; - let mut tool = DbTool::new(provider_factory, self.chain.clone())?; + let tool = DbTool::new(provider_factory, self.chain.clone())?; tool.drop(db_path, static_files_path)?; } Subcommands::Clear(command) => { diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 5c56476a8..650fc9d70 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -132,7 +132,7 @@ impl DbTool { /// Drops the database and the static files at the given path. pub fn drop( - &mut self, + &self, db_path: impl AsRef, static_files_path: impl AsRef, ) -> Result<()> { @@ -149,7 +149,7 @@ impl DbTool { } /// Drops the provided table from the database. - pub fn drop_table(&mut self) -> Result<()> { + pub fn drop_table(&self) -> Result<()> { self.provider_factory.db_ref().update(|tx| tx.clear::())??; Ok(()) } diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index fc9e7685a..b2b30f132 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1216,7 +1216,7 @@ where /// /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. fn revert_canonical_from_database( - &mut self, + &self, revert_until: BlockNumber, ) -> Result, CanonicalError> { // read data that is needed for new sidechain @@ -1239,7 +1239,7 @@ where } } - fn update_reorg_metrics(&mut self, reorg_depth: f64) { + fn update_reorg_metrics(&self, reorg_depth: f64) { self.metrics.reorgs.increment(1); self.metrics.latest_reorg_depth.set(reorg_depth); } diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 402a6c983..9f2f2c402 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -338,7 +338,7 @@ impl StorageInner { /// /// This returns the poststate from execution and post-block changes, as well as the gas used. pub(crate) fn execute( - &mut self, + &self, block: &BlockWithSenders, executor: &mut EVMProcessor<'_, EvmConfig>, ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 48343d480..47085be00 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -124,7 +124,7 @@ impl EngineHooksController { } fn poll_next_hook_inner( - &mut self, + &self, cx: &mut Context<'_>, hook: &mut Box, args: EngineHookContext, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f3aa249fa..3e12c5f8e 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -446,7 +446,7 @@ where /// /// Returns `true` if the head needs to be updated. fn on_head_already_canonical( - &mut self, + &self, header: &SealedHeader, attrs: &mut Option, ) -> bool { @@ -804,7 +804,7 @@ where /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are /// consistent with the head block. fn ensure_consistent_forkchoice_state( - &mut self, + &self, state: ForkchoiceState, ) -> ProviderResult> { // Ensure that the finalized block, if not zero, is known and in the canonical chain diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1037395b8..1de8c102e 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -258,7 +258,7 @@ impl ExExManager { /// Updates the current buffer capacity and notifies all `is_ready` watchers of the manager's /// readiness to receive notifications. - fn update_capacity(&mut self) { + fn update_capacity(&self) { let capacity = self.max_capacity.saturating_sub(self.buffer.len()); self.current_capacity.store(capacity, Ordering::Relaxed); self.metrics.current_capacity.set(capacity as f64); diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 3c35c745e..98c670ef7 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -173,7 +173,7 @@ impl MeteredSender { /// Calls the underlying [Sender](mpsc::Sender)'s `send`, incrementing the appropriate /// metrics depending on the result. - pub async fn send(&mut self, value: T) -> Result<(), SendError> { + pub async fn send(&self, value: T) -> Result<(), SendError> { match self.sender.send(value).await { Ok(()) => { self.metrics.messages_sent.increment(1); diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 1a942a5b9..9a0cb9c11 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -992,7 +992,7 @@ impl Discv4Service { } /// Encodes the packet, sends it and returns the hash. - pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 { + pub(crate) fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 { let (payload, hash) = msg.encode(&self.secret_key); trace!(target: "discv4", r#type=?msg.msg_type(), ?to, ?hash, "sending packet"); let _ = self.egress.try_send((payload, to)).map_err(|err| { @@ -1277,7 +1277,7 @@ impl Discv4Service { /// Handler for incoming `EnrRequest` message fn on_enr_request( - &mut self, + &self, msg: EnrRequest, remote_addr: SocketAddr, id: PeerId, diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index dae3ea388..d4930f204 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -114,7 +114,7 @@ impl MockDiscovery { } /// Encodes the packet, sends it and returns the hash. - fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 { + fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 { let (payload, hash) = msg.encode(&self.secret_key); let _ = self.egress.try_send((payload, to)); hash diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 8895f8a16..e9bc79dce 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -220,7 +220,7 @@ impl Discv5 { } /// Process an event from the underlying [`discv5::Discv5`] node. - pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { + pub fn on_discv5_update(&self, update: discv5::Event) -> Option { match update { discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) | // `Discovered` not unique discovered peers @@ -254,7 +254,7 @@ impl Discv5 { /// Processes a discovered peer. Returns `true` if peer is added to pub fn on_discovered_peer( - &mut self, + &self, enr: &discv5::Enr, socket: SocketAddr, ) -> Option { @@ -724,7 +724,7 @@ mod tests { let remote_key = CombinedKey::generate_secp256k1(); let remote_enr = Enr::builder().tcp4(REMOTE_RLPX_PORT).build(&remote_key).unwrap(); - let mut discv5 = discv5_noop(); + let discv5 = discv5_noop(); // test let filtered_peer = discv5.on_discovered_peer(&remote_enr, remote_socket); diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index e5ddc0fd1..5000e524e 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -67,13 +67,13 @@ pub struct DnsDiscoveryHandle { impl DnsDiscoveryHandle { /// Starts syncing the given link to a tree. - pub fn sync_tree(&mut self, link: &str) -> Result<(), ParseDnsEntryError> { + pub fn sync_tree(&self, link: &str) -> Result<(), ParseDnsEntryError> { self.sync_tree_with_link(link.parse()?); Ok(()) } /// Starts syncing the given link to a tree. - pub fn sync_tree_with_link(&mut self, link: LinkEntry) { + pub fn sync_tree_with_link(&self, link: LinkEntry) { let _ = self.to_service.send(DnsDiscoveryCommand::SyncTree(link)); } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 985c545e9..8f97e09c7 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -68,7 +68,7 @@ where Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. - fn next_headers_request(&mut self) -> DownloadResult>> { + fn next_headers_request(&self) -> DownloadResult>> { let start_at = match self.in_progress_queue.last_requested_block_number { Some(num) => num + 1, None => *self.download_range.start(), diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ee783a1a4..ef72a891b 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -224,7 +224,7 @@ impl FileClient { } /// Returns an iterator over headers in the client. - pub fn headers_iter(&mut self) -> impl Iterator { + pub fn headers_iter(&self) -> impl Iterator { self.headers.values() } diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 273f97e58..a5cdb145b 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -536,7 +536,7 @@ where /// Handles the error of a bad response /// /// This will re-submit the request. - fn on_headers_error(&mut self, err: Box) { + fn on_headers_error(&self, err: Box) { let HeadersResponseError { request, peer_id, error } = *err; self.penalize_peer(peer_id, &error); @@ -581,7 +581,7 @@ where } /// Starts a request future - fn submit_request(&mut self, request: HeadersRequest, priority: Priority) { + fn submit_request(&self, request: HeadersRequest, priority: Priority) { trace!(target: "downloaders::headers", ?request, "Submitting headers request"); self.in_progress_queue.push(self.request_fut(request, priority)); self.metrics.in_flight_requests.increment(1.); diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 8677ae77c..82eccd5c8 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -239,7 +239,7 @@ impl MultiplexInner { } /// Delegates a message to the matching protocol. - fn delegate_message(&mut self, cap: &SharedCapability, msg: BytesMut) -> bool { + fn delegate_message(&self, cap: &SharedCapability, msg: BytesMut) -> bool { for proto in &self.protocols { if proto.shared_cap == *cap { proto.send_raw(msg); diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs index a9bbe2fdb..18112346e 100644 --- a/crates/net/eth-wire/src/muxdemux.rs +++ b/crates/net/eth-wire/src/muxdemux.rs @@ -171,7 +171,7 @@ impl MuxDemuxStream { /// Checks if all clones of this shared stream have been dropped, if true then returns // /// function to drop the stream. - fn can_drop(&mut self) -> bool { + fn can_drop(&self) -> bool { for tx in self.demux.values() { if !tx.is_closed() { return false diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 319c8e311..e20d882fe 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -46,6 +46,7 @@ macro_rules! poll_nested_stream_with_budget { loop { match $poll_stream { Poll::Ready(Some(item)) => { + #[allow(unused_mut)] let mut f = $on_ready_some; f(item); diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 57e83391d..3268ff898 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -139,7 +139,7 @@ where } fn on_headers_request( - &mut self, + &self, _peer_id: PeerId, request: GetBlockHeaders, response: oneshot::Sender>, @@ -150,7 +150,7 @@ where } fn on_bodies_request( - &mut self, + &self, _peer_id: PeerId, request: GetBlockBodies, response: oneshot::Sender>, @@ -187,7 +187,7 @@ where } fn on_receipts_request( - &mut self, + &self, _peer_id: PeerId, request: GetReceipts, response: oneshot::Sender>, diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 3a529c97e..9ad50edb0 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -130,7 +130,7 @@ impl StateFetcher { /// Returns the _next_ idle peer that's ready to accept a request, /// prioritizing those with the lowest timeout/latency and those that recently responded with /// adequate data. - fn next_best_peer(&mut self) -> Option { + fn next_best_peer(&self) -> Option { let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle()); let mut best_peer = idle.next()?; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 39d29ee71..0d2a33408 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -403,7 +403,7 @@ where } /// Handle an incoming request from the peer - fn on_eth_request(&mut self, peer_id: PeerId, req: PeerRequest) { + fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { match req { PeerRequest::GetBlockHeaders { request, response } => { self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders { diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 0020b4927..309184ca3 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -234,7 +234,7 @@ where } /// Invoked when a new [`ForkId`] is activated. - pub(crate) fn update_fork_id(&mut self, fork_id: ForkId) { + pub(crate) fn update_fork_id(&self, fork_id: ForkId) { self.discovery.update_fork_id(fork_id) } diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index e82a20a31..f26b1abe2 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -239,7 +239,7 @@ impl TransactionFetcher { /// /// Returns left over hashes. pub fn pack_request( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: ValidAnnouncementData, ) -> RequestTxHashes { @@ -260,7 +260,7 @@ impl TransactionFetcher { /// response. If no, it's added to surplus hashes. If yes, it's added to hashes to the request /// and expected response size is accumulated. pub fn pack_request_eth68( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: impl HandleMempoolData + IntoIterator)>, @@ -328,7 +328,7 @@ impl TransactionFetcher { /// /// Returns left over hashes. pub fn pack_request_eth66( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: ValidAnnouncementData, ) -> RequestTxHashes { diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 2689226ea..ba7ae8da4 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -232,7 +232,7 @@ impl NodeState { } } - fn handle_network_event(&mut self, _: NetworkEvent) { + fn handle_network_event(&self, _: NetworkEvent) { // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already // logged in the networking component. I kept this stub in case we want to catch other // networking events later on. diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index cea80398e..7d86a0056 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1041,12 +1041,12 @@ where Network: NetworkInfo + Peers + Clone + 'static, { /// Instantiates AdminApi - pub fn admin_api(&mut self) -> AdminApi { + pub fn admin_api(&self) -> AdminApi { AdminApi::new(self.network.clone(), self.provider.chain_spec()) } /// Instantiates Web3Api - pub fn web3_api(&mut self) -> Web3Api { + pub fn web3_api(&self) -> Web3Api { Web3Api::new(self.network.clone()) } @@ -1443,7 +1443,7 @@ where } /// Instantiates RethApi - pub fn reth_api(&mut self) -> RethApi { + pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } } diff --git a/crates/rpc/rpc/src/eth/api/sign.rs b/crates/rpc/rpc/src/eth/api/sign.rs index 66df0e8de..5cbdefa41 100644 --- a/crates/rpc/rpc/src/eth/api/sign.rs +++ b/crates/rpc/rpc/src/eth/api/sign.rs @@ -42,7 +42,7 @@ impl EthApi RethResult<()> { + pub fn produce_static_files(&self) -> RethResult<()> { let mut static_file_producer = self.static_file_producer.lock(); let provider = self.provider_factory.provider()?; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 186382e36..77fcf2e15 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -117,7 +117,7 @@ impl MerkleStage { /// Saves the hashing progress pub fn save_execution_checkpoint( - &mut self, + &self, provider: &DatabaseProviderRW, checkpoint: Option, ) -> Result<(), StageError> { diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index f8c251208..ba8c6b062 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -5,7 +5,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] +#![allow(missing_docs, clippy::needless_pass_by_ref_mut)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub use crate::{ diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 1cecdba40..1abbfba75 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -366,7 +366,7 @@ impl NippyJar { /// Writes all data and configuration to a file and the offset index to another. pub fn freeze( - mut self, + self, columns: Vec>>>, total_rows: u64, ) -> Result { @@ -392,7 +392,7 @@ impl NippyJar { } /// Freezes [`PerfectHashingFunction`], [`InclusionFilter`] and the offset index to file. - fn freeze_filters(&mut self) -> Result<(), NippyJarError> { + fn freeze_filters(&self) -> Result<(), NippyJarError> { debug!(target: "nippy-jar", path=?self.index_path(), "Writing offsets and offsets index to file."); let mut file = File::create(self.index_path())?; @@ -405,7 +405,7 @@ impl NippyJar { /// Safety checks before creating and returning a [`File`] handle to write data to. fn check_before_freeze( - &mut self, + &self, columns: &[impl IntoIterator>>], ) -> Result<(), NippyJarError> { if columns.len() != self.columns { @@ -427,7 +427,7 @@ impl NippyJar { } /// Writes all necessary configuration to file. - fn freeze_config(&mut self) -> Result<(), NippyJarError> { + fn freeze_config(&self) -> Result<(), NippyJarError> { Ok(bincode::serialize_into(File::create(self.config_path())?, &self)?) } } @@ -1200,7 +1200,7 @@ mod tests { fn append_two_rows(num_columns: usize, file_path: &Path, col1: &[Vec], col2: &[Vec]) { // Create and add 1 row { - let mut nippy = NippyJar::new_without_header(num_columns, file_path); + let nippy = NippyJar::new_without_header(num_columns, file_path); nippy.freeze_config().unwrap(); assert_eq!(nippy.max_row_size, 0); assert_eq!(nippy.rows, 0); diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index e1f4af10b..6417e6007 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -43,7 +43,7 @@ pub struct NippyJarWriter { impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. - pub fn new(mut jar: NippyJar) -> Result { + pub fn new(jar: NippyJar) -> Result { let (data_file, offsets_file, is_created) = Self::create_or_open_files(jar.data_path(), &jar.offsets_path())?; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index c61736b5e..d1aa8560f 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -225,7 +225,7 @@ impl StaticFileProviderRW { /// Verifies if the incoming block number matches the next expected block number /// for a static file. This ensures data continuity when adding new blocks. fn check_next_block_number( - &mut self, + &self, expected_block_number: u64, segment: StaticFileSegment, ) -> ProviderResult<()> { diff --git a/crates/storage/provider/src/test_utils/events.rs b/crates/storage/provider/src/test_utils/events.rs index 34c426661..baa6bc470 100644 --- a/crates/storage/provider/src/test_utils/events.rs +++ b/crates/storage/provider/src/test_utils/events.rs @@ -12,14 +12,14 @@ pub struct TestCanonStateSubscriptions { impl TestCanonStateSubscriptions { /// Adds new block commit to the queue that can be consumed with /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_commit(&mut self, new: Arc) { + pub fn add_next_commit(&self, new: Arc) { let event = CanonStateNotification::Commit { new }; self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) } /// Adds reorg to the queue that can be consumed with /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_reorg(&mut self, old: Arc, new: Arc) { + pub fn add_next_reorg(&self, old: Arc, new: Arc) { let event = CanonStateNotification::Reorg { old, new }; self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 44a90f1cf..bcad71edb 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -428,7 +428,7 @@ impl TxPool { } /// Update sub-pools size metrics. - pub(crate) fn update_size_metrics(&mut self) { + pub(crate) fn update_size_metrics(&self) { let stats = self.size(); self.metrics.pending_pool_transactions.set(stats.pending as f64); self.metrics.pending_pool_size_bytes.set(stats.pending_size as f64); @@ -990,7 +990,7 @@ impl AllTransactions { } /// Updates the size metrics - pub(crate) fn update_size_metrics(&mut self) { + pub(crate) fn update_size_metrics(&self) { self.metrics.all_transactions_by_hash.set(self.by_hash.len() as f64); self.metrics.all_transactions_by_id.set(self.txs.len() as f64); } From ec45ae679fec8ca7e21d1c73f38e6c401ae71a7a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 3 May 2024 13:09:55 +0200 Subject: [PATCH 182/250] chore: log only number & hash when skipping payload (#8069) --- crates/node-core/src/engine/skip_new_payload.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/node-core/src/engine/skip_new_payload.rs index ea5cf61e9..fdcb4aeec 100644 --- a/crates/node-core/src/engine/skip_new_payload.rs +++ b/crates/node-core/src/engine/skip_new_payload.rs @@ -44,7 +44,14 @@ where Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; - tracing::warn!(target: "engine::intercept", ?payload, ?cancun_fields, threshold=this.threshold, skipped=this.skipped, "Skipping new payload"); + tracing::warn!( + target: "engine::intercept", + block_number = payload.block_number(), + block_hash = %payload.block_hash(), + ?cancun_fields, + threshold=this.threshold, + skipped=this.skipped, "Skipping new payload" + ); let _ = tx.send(Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing))); continue } else { From 067b0ff420882b31b00629936071ccd11f19e775 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 13:39:46 +0200 Subject: [PATCH 183/250] feat: integrate ExecutorProvider (#7798) --- Cargo.lock | 16 +- bin/reth/Cargo.toml | 1 + .../src/commands/debug_cmd/build_block.rs | 41 +- bin/reth/src/commands/debug_cmd/execution.rs | 9 +- .../commands/debug_cmd/in_memory_merkle.rs | 41 +- bin/reth/src/commands/debug_cmd/merkle.rs | 7 +- .../src/commands/debug_cmd/replay_engine.rs | 23 +- bin/reth/src/commands/import.rs | 9 +- bin/reth/src/commands/stage/dump/execution.rs | 19 +- bin/reth/src/commands/stage/dump/merkle.rs | 7 +- bin/reth/src/commands/stage/run.rs | 9 +- bin/reth/src/commands/stage/unwind.rs | 26 +- bin/reth/src/lib.rs | 1 + bin/reth/src/macros.rs | 20 + crates/blockchain-tree/Cargo.toml | 3 + crates/blockchain-tree/src/blockchain_tree.rs | 35 +- crates/blockchain-tree/src/chain.rs | 44 +- crates/blockchain-tree/src/externals.rs | 10 +- crates/blockchain-tree/src/shareable.rs | 27 +- crates/consensus/auto-seal/src/lib.rs | 202 ++-- crates/consensus/auto-seal/src/task.rs | 24 +- crates/consensus/beacon/Cargo.toml | 1 + .../consensus/beacon/src/engine/test_utils.rs | 47 +- crates/ethereum/evm/Cargo.toml | 1 - crates/ethereum/evm/src/execute.rs | 99 +- crates/ethereum/evm/src/lib.rs | 1 + crates/ethereum/evm/src/verify.rs | 53 ++ crates/evm/Cargo.toml | 8 + crates/evm/src/either.rs | 119 +++ crates/evm/src/execute.rs | 142 ++- crates/evm/src/lib.rs | 5 + crates/evm/src/test_utils.rs | 80 ++ crates/exex/src/context.rs | 13 +- crates/node-ethereum/src/evm.rs | 2 + crates/node-ethereum/src/lib.rs | 2 +- crates/node-ethereum/src/node.rs | 14 +- crates/node/api/src/node.rs | 7 + crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/builder/mod.rs | 23 + crates/node/builder/src/builder/states.rs | 5 + crates/node/builder/src/components/builder.rs | 14 +- crates/node/builder/src/components/execute.rs | 19 +- crates/node/builder/src/components/mod.rs | 23 +- crates/node/builder/src/launch/mod.rs | 9 +- crates/node/builder/src/setup.rs | 37 +- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/execute.rs | 109 +-- .../mod.rs => optimism/evm/src/l1.rs} | 23 +- crates/optimism/evm/src/lib.rs | 4 + crates/optimism/evm/src/verify.rs | 58 ++ crates/optimism/node/src/node.rs | 15 +- crates/optimism/node/src/txpool.rs | 5 +- crates/payload/optimism/Cargo.toml | 2 + crates/payload/optimism/src/builder.rs | 2 +- crates/revm/Cargo.toml | 5 +- crates/revm/src/factory.rs | 56 -- crates/revm/src/lib.rs | 13 - crates/revm/src/optimism/processor.rs | 401 -------- crates/revm/src/processor.rs | 865 ------------------ crates/rpc/rpc-builder/Cargo.toml | 3 +- crates/rpc/rpc-builder/tests/it/auth.rs | 2 +- crates/rpc/rpc-builder/tests/it/utils.rs | 3 +- crates/rpc/rpc/Cargo.toml | 4 + crates/rpc/rpc/src/eth/api/block.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 26 +- crates/stages/Cargo.toml | 2 + crates/stages/src/lib.rs | 24 +- crates/stages/src/sets.rs | 72 +- crates/stages/src/stages/execution.rs | 60 +- crates/stages/src/stages/mod.rs | 10 +- .../bundle_state_with_receipts.rs | 17 + examples/custom-evm/src/main.rs | 17 +- testing/ef-tests/Cargo.toml | 6 +- testing/ef-tests/src/cases/blockchain_test.rs | 10 +- 74 files changed, 1088 insertions(+), 2028 deletions(-) create mode 100644 bin/reth/src/macros.rs create mode 100644 crates/ethereum/evm/src/verify.rs create mode 100644 crates/evm/src/either.rs create mode 100644 crates/evm/src/test_utils.rs rename crates/{revm/src/optimism/mod.rs => optimism/evm/src/l1.rs} (97%) create mode 100644 crates/optimism/evm/src/verify.rs delete mode 100644 crates/revm/src/factory.rs delete mode 100644 crates/revm/src/optimism/processor.rs delete mode 100644 crates/revm/src/processor.rs diff --git a/Cargo.lock b/Cargo.lock index 24b07b8ab..b2e179a95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2670,8 +2670,8 @@ dependencies = [ "alloy-rlp", "rayon", "reth-db", + "reth-evm-ethereum", "reth-interfaces", - "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", @@ -6393,6 +6393,7 @@ dependencies = [ "reth-discv4", "reth-downloaders", "reth-ethereum-payload-builder", + "reth-evm", "reth-exex", "reth-interfaces", "reth-network", @@ -6490,6 +6491,7 @@ dependencies = [ "reth-downloaders", "reth-engine-primitives", "reth-ethereum-engine-primitives", + "reth-evm", "reth-evm-ethereum", "reth-interfaces", "reth-metrics", @@ -6537,6 +6539,7 @@ dependencies = [ "parking_lot 0.12.2", "reth-consensus", "reth-db", + "reth-evm", "reth-evm-ethereum", "reth-interfaces", "reth-metrics", @@ -6954,6 +6957,8 @@ dependencies = [ name = "reth-evm" version = "0.2.0-beta.6" dependencies = [ + "futures-util", + "parking_lot 0.12.2", "reth-interfaces", "reth-primitives", "revm", @@ -6967,7 +6972,6 @@ dependencies = [ "reth-evm", "reth-interfaces", "reth-primitives", - "reth-provider", "reth-revm", "revm-primitives", "tracing", @@ -6982,6 +6986,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm", + "revm", "revm-primitives", "tracing", ] @@ -7262,6 +7267,7 @@ dependencies = [ "reth-consensus", "reth-db", "reth-downloaders", + "reth-evm", "reth-exex", "reth-interfaces", "reth-network", @@ -7457,6 +7463,7 @@ dependencies = [ "reth-basic-payload-builder", "reth-engine-primitives", "reth-evm", + "reth-evm-optimism", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7645,6 +7652,7 @@ dependencies = [ "reth-consensus-common", "reth-evm", "reth-evm-ethereum", + "reth-evm-optimism", "reth-interfaces", "reth-metrics", "reth-network-api", @@ -7711,12 +7719,13 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-evm", + "reth-evm-ethereum", "reth-interfaces", "reth-ipc", "reth-metrics", "reth-network-api", - "reth-node-ethereum", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7826,6 +7835,7 @@ dependencies = [ "reth-db", "reth-downloaders", "reth-etl", + "reth-evm", "reth-evm-ethereum", "reth-exex", "reth-interfaces", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index c323017d0..3f5d78834 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-primitives = { workspace = true, features = ["arbitrary", "clap"] } reth-db = { workspace = true, features = ["mdbx"] } reth-exex.workspace = true reth-provider = { workspace = true } +reth-evm.workspace = true reth-revm.workspace = true reth-stages.workspace = true reth-interfaces = { workspace = true, features = ["clap"] } diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 9d5942ae1..22361aada 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -6,6 +6,7 @@ use crate::{ DatabaseArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, }; use alloy_rlp::Decodable; use clap::Parser; @@ -20,10 +21,9 @@ use reth_blockchain_tree::{ use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; -#[cfg(not(feature = "optimism"))] -use reth_node_ethereum::EthEvmConfig; use reth_payload_builder::database::CachedReads; use reth_primitives::{ constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, @@ -31,13 +31,14 @@ use reth_primitives::{ revm_primitives::KzgSettings, stage::StageId, Address, BlobTransaction, BlobTransactionSidecar, Bytes, ChainSpec, PooledTransactionsElement, - SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, + Receipts, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, + U256, }; use reth_provider::{ - providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ExecutorFactory, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, + BundleStateWithReceipts, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::EvmProcessorFactory; +use reth_revm::database::StateProviderDatabase; #[cfg(feature = "optimism")] use reth_rpc_types::engine::OptimismPayloadAttributes; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -161,18 +162,11 @@ impl Command { let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); - #[cfg(feature = "optimism")] - let evm_config = reth_node_optimism::OptimismEvmConfig::default(); - - #[cfg(not(feature = "optimism"))] - let evm_config = EthEvmConfig::default(); + let executor = block_executor!(self.chain.clone()); // configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory.clone(), - Arc::clone(&consensus), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); + let tree_externals = + TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); @@ -309,11 +303,16 @@ impl Command { let block_with_senders = SealedBlockWithSenders::new(block.clone(), senders).unwrap(); - let executor_factory = EvmProcessorFactory::new(self.chain.clone(), evm_config); - let mut executor = executor_factory.with_state(blockchain_db.latest()?); - executor - .execute_and_verify_receipt(&block_with_senders.clone().unseal(), U256::MAX)?; - let state = executor.take_output_state(); + let db = StateProviderDatabase::new(blockchain_db.latest()?); + let executor = block_executor!(self.chain.clone()).executor(db); + + let BlockExecutionOutput { state, receipts, .. } = + executor.execute((&block_with_senders.clone().unseal(), U256::MAX).into())?; + let state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); debug!(target: "reth::cli", ?state, "Executed block"); let hashed_state = state.hash_state_slow(); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index a83ea19fd..33b07368a 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::get_single_header, }; use clap::Parser; @@ -25,7 +26,6 @@ use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::Header use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_core::init::init_genesis; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; @@ -111,8 +111,7 @@ impl Command { let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); + let executor = block_executor!(self.chain.clone()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() @@ -124,14 +123,14 @@ impl Command { Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_conf.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: stage_conf.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: None, max_changes: None, diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index e68231a76..008530c53 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; @@ -14,16 +15,17 @@ use clap::Parser; use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::executor::BlockValidationError; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_ethereum::EthEvmConfig; -use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; +use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec, Receipts}; use reth_provider::{ - AccountExtReader, ExecutorFactory, HashingWriter, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, - StorageReader, + AccountExtReader, BundleStateWithReceipts, HashingWriter, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, StorageReader, }; +use reth_revm::database::StateProviderDatabase; use reth_tasks::TaskExecutor; use reth_trie::{updates::TrieKey, StateRoot}; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; @@ -162,24 +164,31 @@ impl Command { ) .await?; - let executor_factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - let mut executor = executor_factory.with_state(LatestStateProviderRef::new( + let db = StateProviderDatabase::new(LatestStateProviderRef::new( provider.tx_ref(), factory.static_file_provider(), )); + let executor = block_executor!(self.chain.clone()).executor(db); + let merkle_block_td = provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); - executor.execute_and_verify_receipt( - &block - .clone() - .unseal() - .with_recovered_senders() - .ok_or(BlockValidationError::SenderRecoveryError)?, - merkle_block_td + block.difficulty, + let BlockExecutionOutput { state, receipts, .. } = executor.execute( + ( + &block + .clone() + .unseal() + .with_recovered_senders() + .ok_or(BlockValidationError::SenderRecoveryError)?, + merkle_block_td + block.difficulty, + ) + .into(), )?; - let block_state = executor.take_output_state(); + let block_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 07075ff26..6d895fccf 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::get_single_header, }; use backon::{ConstantBuilder, Retryable}; @@ -20,7 +21,6 @@ use reth_exex::ExExManagerHandle; use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::{StageCheckpoint, StageId}, @@ -201,10 +201,9 @@ impl Command { checkpoint.stage_checkpoint.is_some() }); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); + let executor = block_executor!(self.chain.clone()); let mut execution_stage = ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: Some(1), max_changes: None, diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 947c12745..da2e458be 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -5,6 +5,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, }; use clap::Parser; use eyre::Context; @@ -20,15 +21,12 @@ use reth_db::{init_db, DatabaseEnv}; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; -#[cfg(not(feature = "optimism"))] -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{fs, ChainSpec, PruneModes}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory, StaticFileProviderFactory, }; -use reth_revm::EvmProcessorFactory; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; @@ -126,18 +124,11 @@ impl Command { let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); - #[cfg(not(feature = "optimism"))] - let evm_config = EthEvmConfig::default(); - - #[cfg(feature = "optimism")] - let evm_config = reth_node_optimism::OptimismEvmConfig::default(); + let executor = block_executor!(self.chain.clone()); // Configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory.clone(), - Arc::clone(&consensus), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); + let tree_externals = + TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); @@ -184,8 +175,10 @@ impl Command { ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); #[cfg(not(feature = "optimism"))] - let (payload_service, payload_builder): (_, PayloadBuilderHandle) = - PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); + let (payload_service, payload_builder): ( + _, + PayloadBuilderHandle, + ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); ctx.task_executor.spawn_critical("payload builder service", payload_service); diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 3c191d8bb..0d5b24275 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -6,6 +6,7 @@ use crate::{ DatabaseArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, version::SHORT_VERSION, }; use clap::Parser; @@ -26,7 +27,6 @@ use reth_interfaces::p2p::{ headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_node_core::init::init_genesis; -use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ @@ -269,8 +269,7 @@ where .expect("failed to set download range"); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(provider_factory.chain_spec(), EthEvmConfig::default()); + let executor = block_executor!(provider_factory.chain_spec()); let max_block = file_client.max_block().unwrap_or(0); @@ -285,14 +284,14 @@ where consensus.clone(), header_downloader, body_downloader, - factory.clone(), + executor.clone(), config.stages.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: config.stages.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: config.stages.execution.max_blocks, max_changes: config.stages.execution.max_changes, diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index 571ce486a..d8f12b50a 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -1,15 +1,12 @@ use super::setup; -use crate::utils::DbTool; -use eyre::Result; +use crate::{macros::block_executor, utils::DbTool}; use reth_db::{ cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx, DatabaseEnv, }; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::stage::StageCheckpoint; use reth_provider::{ChainSpecProvider, ProviderFactory}; -use reth_revm::EvmProcessorFactory; use reth_stages::{stages::ExecutionStage, Stage, UnwindInput}; use tracing::info; @@ -19,7 +16,7 @@ pub(crate) async fn dump_execution_stage( to: u64, output_datadir: ChainPath, should_run: bool, -) -> Result<()> { +) -> eyre::Result<()> { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; @@ -127,10 +124,8 @@ async fn unwind_and_copy( ) -> eyre::Result<()> { let provider = db_tool.provider_factory.provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new( - db_tool.chain.clone(), - EthEvmConfig::default(), - )); + let executor = block_executor!(db_tool.chain.clone()); + let mut exec_stage = ExecutionStage::new_with_executor(executor); exec_stage.unwind( &provider, @@ -159,10 +154,8 @@ async fn dry_run( ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage. [dry-run]"); - let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new( - output_provider_factory.chain_spec(), - EthEvmConfig::default(), - )); + let executor = block_executor!(output_provider_factory.chain_spec()); + let mut exec_stage = ExecutionStage::new_with_executor(executor); let input = reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 55a8ec76d..9b421be7c 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -1,11 +1,10 @@ use super::setup; -use crate::utils::DbTool; +use crate::{macros::block_executor, utils::DbTool}; use eyre::Result; use reth_config::config::EtlConfig; use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; use reth_exex::ExExManagerHandle; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{stage::StageCheckpoint, BlockNumber, PruneModes}; use reth_provider::ProviderFactory; use reth_stages::{ @@ -81,9 +80,11 @@ async fn unwind_and_copy( MerkleStage::default_unwind().unwind(&provider, unwind)?; + let executor = block_executor!(db_tool.chain.clone()); + // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - reth_revm::EvmProcessorFactory::new(db_tool.chain.clone(), EthEvmConfig::default()), + executor, ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index d798c87d1..562b7e1b3 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -9,6 +9,7 @@ use crate::{ DatabaseArgs, NetworkArgs, StageEnum, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, prometheus_exporter, version::SHORT_VERSION, }; @@ -19,7 +20,6 @@ use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_exex::ExExManagerHandle; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::ChainSpec; use reth_provider::{ ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, @@ -224,13 +224,10 @@ impl Command { } StageEnum::Senders => (Box::new(SenderRecoveryStage::new(batch_size)), None), StageEnum::Execution => { - let factory = reth_revm::EvmProcessorFactory::new( - self.chain.clone(), - EthEvmConfig::default(), - ); + let executor = block_executor!(self.chain.clone()); ( Box::new(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: Some(batch_size), max_changes: None, diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 0c4260c0c..c6dea1a05 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -1,12 +1,5 @@ //! Unwinding a certain block range -use crate::{ - args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, - }, - dirs::{DataDirPath, MaybePlatformPath}, -}; use clap::{Parser, Subcommand}; use reth_beacon_consensus::BeaconConsensus; use reth_config::{Config, PruneConfig}; @@ -21,7 +14,6 @@ use reth_node_core::{ args::{get_secret_key, NetworkArgs}, dirs::ChainPath, }; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, @@ -42,6 +34,15 @@ use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; use tracing::info; +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, +}; + /// `reth stage unwind` command #[derive(Debug, Parser)] pub struct Command { @@ -178,10 +179,7 @@ impl Command { let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new( - provider_factory.chain_spec(), - EthEvmConfig::default(), - ); + let executor = block_executor!(provider_factory.chain_spec()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() @@ -193,14 +191,14 @@ impl Command { Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_conf.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: stage_conf.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: None, max_changes: None, diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 42f26115c..9dd43bcd2 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -31,6 +31,7 @@ pub mod cli; pub mod commands; +mod macros; pub mod utils; /// Re-exported payload related types diff --git a/bin/reth/src/macros.rs b/bin/reth/src/macros.rs new file mode 100644 index 000000000..7ff81a0f9 --- /dev/null +++ b/bin/reth/src/macros.rs @@ -0,0 +1,20 @@ +//! Helper macros + +/// Creates the block executor type based on the configured feature. +/// +/// Note(mattsse): This is incredibly horrible and will be replaced +#[cfg(not(feature = "optimism"))] +macro_rules! block_executor { + ($chain_spec:expr) => { + reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) + }; +} + +#[cfg(feature = "optimism")] +macro_rules! block_executor { + ($chain_spec:expr) => { + reth_node_optimism::OpExecutorProvider::optimism($chain_spec) + }; +} + +pub(crate) use block_executor; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index ecb2e4ef3..912f593dc 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -15,6 +15,8 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-db.workspace = true +reth-evm.workspace = true +reth-revm.workspace = true reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } @@ -40,6 +42,7 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm.workspace = true reth-evm-ethereum.workspace = true parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b2b30f132..5346eafbd 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -7,6 +7,7 @@ use crate::{ }; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, @@ -24,7 +25,7 @@ use reth_provider::{ chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, - ChainSpecProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, ProviderError, + ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ @@ -57,13 +58,13 @@ use tracing::{debug, error, info, instrument, trace, warn}; /// * [BlockchainTree::make_canonical]: Check if we have the hash of a block that is the current /// canonical head and commit it to db. #[derive(Debug)] -pub struct BlockchainTree { +pub struct BlockchainTree { /// The state of the tree /// /// Tracks all the chains, the block indices, and the block buffer. state: TreeState, /// External components (the database, consensus engine etc.) - externals: TreeExternals, + externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, /// Broadcast channel for canon state changes notifications. @@ -75,7 +76,7 @@ pub struct BlockchainTree { prune_modes: Option, } -impl BlockchainTree { +impl BlockchainTree { /// Subscribe to new blocks events. /// /// Note: Only canonical blocks are emitted by the tree. @@ -89,10 +90,10 @@ impl BlockchainTree { } } -impl BlockchainTree +impl BlockchainTree where DB: Database + Clone, - EVM: ExecutorFactory, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// @@ -115,7 +116,7 @@ where /// storage space efficiently. It's important to validate this configuration to ensure it does /// not lead to unintended data loss. pub fn new( - externals: TreeExternals, + externals: TreeExternals, config: BlockchainTreeConfig, prune_modes: Option, ) -> RethResult { @@ -1273,7 +1274,8 @@ mod tests { use linked_hash_set::LinkedHashSet; use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm::test_utils::MockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutorProvider; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; #[cfg(feature = "optimism")] @@ -1289,19 +1291,15 @@ mod tests { MAINNET, }; use reth_provider::{ - test_utils::{ - blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, - TestExecutorFactory, - }, + test_utils::{blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec}, ProviderFactory, }; - use reth_revm::EvmProcessorFactory; use reth_trie::StateRoot; use std::collections::HashMap; fn setup_externals( exec_res: Vec, - ) -> TreeExternals>, TestExecutorFactory> { + ) -> TreeExternals>, MockExecutorProvider> { let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -1311,7 +1309,7 @@ mod tests { ); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); let consensus = Arc::new(TestConsensus::default()); - let executor_factory = TestExecutorFactory::default(); + let executor_factory = MockExecutorProvider::default(); executor_factory.extend(exec_res); TreeExternals::new(provider_factory, consensus, executor_factory) @@ -1395,7 +1393,7 @@ mod tests { self } - fn assert(self, tree: &BlockchainTree) { + fn assert(self, tree: &BlockchainTree) { if let Some(chain_num) = self.chain_num { assert_eq!(tree.state.chains.len(), chain_num); } @@ -1439,8 +1437,7 @@ mod tests { ); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); let consensus = Arc::new(TestConsensus::default()); - let executor_factory = - EvmProcessorFactory::new(chain_spec.clone(), EthEvmConfig::default()); + let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); { let provider_rw = provider_factory.provider_rw().unwrap(); @@ -1548,7 +1545,7 @@ mod tests { mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); let mut tree = BlockchainTree::new( - TreeExternals::new(provider_factory, consensus, executor_factory), + TreeExternals::new(provider_factory, consensus, executor_provider), BlockchainTreeConfig::default(), None, ) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index c091b800a..637ea52e7 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -7,6 +7,7 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, InsertBlockErrorKind}, @@ -15,13 +16,14 @@ use reth_interfaces::{ RethResult, }; use reth_primitives::{ - BlockHash, BlockNumber, ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader, U256, + BlockHash, BlockNumber, ForkBlock, GotExpected, Receipts, SealedBlockWithSenders, SealedHeader, + U256, }; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView}, - BundleStateDataProvider, BundleStateWithReceipts, Chain, ExecutorFactory, ProviderError, - StateRootProvider, + BundleStateDataProvider, BundleStateWithReceipts, Chain, ProviderError, StateRootProvider, }; +use reth_revm::database::StateProviderDatabase; use reth_trie::updates::TrieUpdates; use reth_trie_parallel::parallel_root::ParallelStateRoot; use std::{ @@ -66,18 +68,18 @@ impl AppendableChain { /// /// if [BlockValidationKind::Exhaustive] is specified, the method will verify the state root of /// the block. - pub fn new_canonical_fork( + pub fn new_canonical_fork( block: SealedBlockWithSenders, parent_header: &SealedHeader, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> Result where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let state = BundleStateWithReceipts::default(); let empty = BTreeMap::new(); @@ -104,18 +106,18 @@ impl AppendableChain { /// Create a new chain that forks off of an existing sidechain. /// /// This differs from [AppendableChain::new_canonical_fork] in that this starts a new fork. - pub(crate) fn new_chain_fork( + pub(crate) fn new_chain_fork( &self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_validation_kind: BlockValidationKind, ) -> Result where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let parent_number = block.number - 1; let parent = self.blocks().get(&parent_number).ok_or( @@ -166,18 +168,18 @@ impl AppendableChain { /// - [BlockAttachment] represents if the block extends the canonical chain, and thus we can /// cache the trie state updates. /// - [BlockValidationKind] determines if the state root __should__ be validated. - fn validate_and_execute( + fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, bundle_state_data_provider: BSDP, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> RethResult<(BundleStateWithReceipts, Option)> where BSDP: BundleStateDataProvider, DB: Database + Clone, - EVM: ExecutorFactory, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -203,11 +205,17 @@ impl AppendableChain { let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); - let mut executor = externals.executor_factory.with_state(&provider); + let db = StateProviderDatabase::new(&provider); + let executor = externals.executor_factory.executor(db); let block_hash = block.hash(); let block = block.unseal(); - executor.execute_and_verify_receipt(&block, U256::MAX)?; - let bundle_state = executor.take_output_state(); + let state = executor.execute((&block, U256::MAX).into())?; + let BlockExecutionOutput { state, receipts, .. } = state; + let bundle_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); // check state root if the block extends the canonical chain __and__ if state root // validation was requested. @@ -259,19 +267,19 @@ impl AppendableChain { /// __not__ the canonical head. #[track_caller] #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( + pub(crate) fn append_block( &mut self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, + externals: &TreeExternals, canonical_fork: ForkBlock, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> Result<(), InsertBlockErrorKind> where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 36f304173..a311281c9 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -19,27 +19,27 @@ use std::{collections::BTreeMap, sync::Arc}; /// - The executor factory to execute blocks with /// - The chain spec #[derive(Debug)] -pub struct TreeExternals { +pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. pub(crate) consensus: Arc, /// The executor factory to execute blocks with. - pub(crate) executor_factory: EVM, + pub(crate) executor_factory: E, } -impl TreeExternals { +impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, consensus: Arc, - executor_factory: EVM, + executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } } } -impl TreeExternals { +impl TreeExternals { /// Fetches the latest canonical block hashes by walking backwards from the head. /// /// Returns the hashes sorted by increasing block numbers diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 7a0eb36fa..061b49f4c 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -3,6 +3,7 @@ use super::BlockchainTree; use parking_lot::RwLock; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::{ blockchain_tree::{ error::{CanonicalError, InsertBlockError}, @@ -17,7 +18,7 @@ use reth_primitives::{ }; use reth_provider::{ BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateSubscriptions, - ExecutorFactory, ProviderError, + ProviderError, }; use std::{ collections::{BTreeMap, HashSet}, @@ -27,22 +28,22 @@ use tracing::trace; /// Shareable blockchain tree that is behind a RwLock #[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { +pub struct ShareableBlockchainTree { /// BlockchainTree - pub tree: Arc>>, + pub tree: Arc>>, } -impl ShareableBlockchainTree { +impl ShareableBlockchainTree { /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { + pub fn new(tree: BlockchainTree) -> Self { Self { tree: Arc::new(RwLock::new(tree)) } } } -impl BlockchainTreeEngine for ShareableBlockchainTree +impl BlockchainTreeEngine for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -99,10 +100,10 @@ where } } -impl BlockchainTreeViewer for ShareableBlockchainTree +impl BlockchainTreeViewer for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn blocks(&self) -> BTreeMap> { trace!(target: "blockchain_tree", "Returning all blocks in blockchain tree"); @@ -181,10 +182,10 @@ where } } -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree +impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, @@ -196,10 +197,10 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl CanonStateSubscriptions for ShareableBlockchainTree where DB: Send + Sync, - EF: Send + Sync, + E: Send + Sync, { fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 9f2f2c402..e954108c8 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -18,23 +18,18 @@ use reth_beacon_consensus::BeaconEngineMessage; use reth_consensus::{Consensus, ConsensusError}; use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ - constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, + constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, - proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Bloom, - ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, - B256, U256, + proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header, + Receipts, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, }; use reth_provider::{ - BlockExecutor, BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, - StateProviderFactory, -}; -use reth_revm::{ - database::StateProviderDatabase, db::states::bundle_state::BundleRetention, - processor::EVMProcessor, State, + BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, StateProviderFactory, + StateRootProvider, }; +use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::TransactionPool; use std::{ collections::HashMap, @@ -50,6 +45,7 @@ mod task; pub use crate::client::AutoSealClient; pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; pub use task::MiningTask; /// A consensus implementation intended for local development and testing purposes. @@ -281,6 +277,18 @@ impl StorageInner { parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) }); + let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(timestamp) { + let mut sum_blob_gas_used = 0; + for tx in transactions { + if let Some(blob_tx) = tx.transaction.as_eip4844() { + sum_blob_gas_used += blob_tx.blob_gas(); + } + } + Some(sum_blob_gas_used) + } else { + None + }; + let mut header = Header { parent_hash: self.best_hash, ommers_hash: proofs::calculate_ommers_root(ommers), @@ -298,7 +306,7 @@ impl StorageInner { mix_hash: Default::default(), nonce: 0, base_fee_per_gas, - blob_gas_used: None, + blob_gas_used, excess_blob_gas: None, extra_data: Default::default(), parent_beacon_block_root: None, @@ -334,111 +342,26 @@ impl StorageInner { header } - /// Executes the block with the given block and senders, on the provided [EVMProcessor]. - /// - /// This returns the poststate from execution and post-block changes, as well as the gas used. - pub(crate) fn execute( - &self, - block: &BlockWithSenders, - executor: &mut EVMProcessor<'_, EvmConfig>, - ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> - where - EvmConfig: ConfigureEvm, - { - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - // TODO: there isn't really a parent beacon block root here, so not sure whether or not to - // call the 4788 beacon contract - - // set the first block to find the correct index in bundle state - executor.set_first_block(block.number); - - let (receipts, gas_used) = executor.execute_transactions(block, U256::ZERO)?; - - // Save receipts. - executor.save_receipts(receipts)?; - - // add post execution state change - // Withdrawals, rewards etc. - executor.apply_post_execution_state_change(block, U256::ZERO)?; - - // merge transitions - executor.db_mut().merge_transitions(BundleRetention::Reverts); - - // apply post block changes - Ok((executor.take_output_state(), gas_used)) - } - - /// Fills in the post-execution header fields based on the given BundleState and gas used. - /// In doing this, the state root is calculated and the final header is returned. - pub(crate) fn complete_header( - &self, - mut header: Header, - bundle_state: &BundleStateWithReceipts, - client: &S, - gas_used: u64, - blob_gas_used: Option, - #[cfg(feature = "optimism")] chain_spec: &ChainSpec, - ) -> Result { - let receipts = bundle_state.receipts_by_block(header.number); - header.receipts_root = if receipts.is_empty() { - EMPTY_RECEIPTS - } else { - let receipts_with_bloom = receipts - .iter() - .map(|r| (*r).clone().expect("receipts have not been pruned").into()) - .collect::>(); - header.logs_bloom = - receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - #[cfg(feature = "optimism")] - { - proofs::calculate_receipt_root_optimism( - &receipts_with_bloom, - chain_spec, - header.timestamp, - ) - } - #[cfg(not(feature = "optimism"))] - { - proofs::calculate_receipt_root(&receipts_with_bloom) - } - }; - - header.gas_used = gas_used; - header.blob_gas_used = blob_gas_used; - - // calculate the state root - let state_root = client - .latest() - .map_err(BlockExecutionError::LatestBlock)? - .state_root(bundle_state.state()) - .unwrap(); - header.state_root = state_root; - Ok(header) - } - - /// Builds and executes a new block with the given transactions, on the provided [EVMProcessor]. + /// Builds and executes a new block with the given transactions, on the provided executor. /// /// This returns the header of the executed block, as well as the poststate from execution. - pub(crate) fn build_and_execute( + pub(crate) fn build_and_execute( &mut self, transactions: Vec, ommers: Vec
, withdrawals: Option, - client: &impl StateProviderFactory, + provider: &Provider, chain_spec: Arc, - evm_config: &EvmConfig, + executor: &Executor, ) -> Result<(SealedHeader, BundleStateWithReceipts), BlockExecutionError> where - EvmConfig: ConfigureEvm, + Executor: BlockExecutorProvider, + Provider: StateProviderFactory, { - let header = self.build_header_template( - &transactions, - &ommers, - withdrawals.as_ref(), - chain_spec.clone(), - ); + let header = + self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec); - let block = Block { + let mut block = Block { header, body: transactions, ommers: ommers.clone(), @@ -449,45 +372,46 @@ impl StorageInner { trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - // now execute the block - let db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new( - client.latest().map_err(BlockExecutionError::LatestBlock)?, - ))) - .with_bundle_update() - .build(); - let mut executor = EVMProcessor::new_with_state(chain_spec.clone(), db, evm_config); + let mut db = StateProviderDatabase::new( + provider.latest().map_err(BlockExecutionError::LatestBlock)?, + ); - let (bundle_state, gas_used) = self.execute(&block, &mut executor)?; + // TODO(mattsse): At this point we don't know certain fields of the header, so we first + // execute it and then update the header this can be improved by changing the executor + // input, for now we intercept the errors and retry + loop { + match executor.executor(&mut db).execute((&block, U256::ZERO).into()) { + Err(BlockExecutionError::Validation(BlockValidationError::BlockGasUsed { + gas, + .. + })) => { + block.block.header.gas_used = gas.got; + } + Err(BlockExecutionError::Validation(BlockValidationError::ReceiptRootDiff( + err, + ))) => { + block.block.header.receipts_root = err.got; + } + _ => break, + }; + } - let Block { header, body, .. } = block.block; - let body = BlockBody { transactions: body, ommers, withdrawals }; + // now execute the block + let BlockExecutionOutput { state, receipts, .. } = + executor.executor(&mut db).execute((&block, U256::ZERO).into())?; + let bundle_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); - let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - let mut sum_blob_gas_used = 0; - for tx in &body.transactions { - if let Some(blob_tx) = tx.transaction.as_eip4844() { - sum_blob_gas_used += blob_tx.blob_gas(); - } - } - Some(sum_blob_gas_used) - } else { - None - }; + let Block { mut header, body, .. } = block.block; + let body = BlockBody { transactions: body, ommers, withdrawals }; trace!(target: "consensus::auto", ?bundle_state, ?header, ?body, "executed block, calculating state root and completing header"); - // fill in the rest of the fields - let header = self.complete_header( - header, - &bundle_state, - client, - gas_used, - blob_gas_used, - #[cfg(feature = "optimism")] - chain_spec.as_ref(), - )?; - + // calculate the state root + header.state_root = db.state_root(bundle_state.state())?; trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); // finally insert into storage diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 7e2a700ef..42f1268f3 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -2,7 +2,7 @@ use crate::{mode::MiningMode, Storage}; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; +use reth_evm::execute::BlockExecutorProvider; use reth_primitives::{ Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders, Withdrawals, }; @@ -22,7 +22,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { +pub struct MiningTask { /// The configured chain spec chain_spec: Arc, /// The client used to interact with the state @@ -43,14 +43,14 @@ pub struct MiningTask>, - /// The type that defines how to configure the EVM. - evm_config: EvmConfig, + /// The type used for block execution + block_executor: Executor, } // === impl MiningTask === -impl - MiningTask +impl + MiningTask { /// Creates a new instance of the task #[allow(clippy::too_many_arguments)] @@ -62,7 +62,7 @@ impl storage: Storage, client: Client, pool: Pool, - evm_config: EvmConfig, + block_executor: Executor, ) -> Self { Self { chain_spec, @@ -75,7 +75,7 @@ impl canon_state_notification, queued: Default::default(), pipe_line_events: None, - evm_config, + block_executor, } } @@ -85,13 +85,13 @@ impl } } -impl Future for MiningTask +impl Future for MiningTask where Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, ::Transaction: IntoRecoveredTransaction, Engine: EngineTypes + 'static, - EvmConfig: ConfigureEvm + Clone + Unpin + Send + Sync + 'static, + Executor: BlockExecutorProvider, { type Output = (); @@ -121,7 +121,7 @@ where let pool = this.pool.clone(); let events = this.pipe_line_events.take(); let canon_state_notification = this.canon_state_notification.clone(); - let evm_config = this.evm_config.clone(); + let executor = this.block_executor.clone(); // Create the mining future that creates a block, notifies the engine that drives // the pipeline @@ -145,7 +145,7 @@ where withdrawals.clone(), &client, chain_spec, - &evm_config, + &executor, ) { Ok((new_header, bundle_state)) => { // clear all transactions from pool diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 4e35d06f0..8fb9d3ec3 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -51,6 +51,7 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true reth-rpc.workspace = true reth-tracing.workspace = true diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 513987e75..27fc6b44c 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -14,9 +14,9 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm_ethereum::EthEvmConfig; +use reth_evm::{either::Either, test_utils::MockExecutorProvider}; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::{ - executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, @@ -24,13 +24,10 @@ use reth_interfaces::{ use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; use reth_provider::{ - providers::BlockchainProvider, - test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, ExecutorFactory, HeaderSyncMode, PrunableBlockExecutor, - StaticFileProviderFactory, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + BundleStateWithReceipts, HeaderSyncMode, StaticFileProviderFactory, }; use reth_prune::Pruner; -use reth_revm::EvmProcessorFactory; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -155,31 +152,6 @@ impl Default for TestExecutorConfig { } } -/// A type that represents one of two possible executor factories. -#[derive(Debug, Clone)] -pub enum EitherExecutorFactory { - /// The first factory variant - Left(A), - /// The second factory variant - Right(B), -} - -impl ExecutorFactory for EitherExecutorFactory -where - A: ExecutorFactory, - B: ExecutorFactory, -{ - fn with_state<'a, SP: reth_provider::StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a> { - match self { - EitherExecutorFactory::Left(a) => a.with_state::<'a, SP>(sp), - EitherExecutorFactory::Right(b) => b.with_state::<'a, SP>(sp), - } - } -} - /// The basic configuration for a `TestConsensusEngine`, without generics for the client or /// consensus engine. #[derive(Debug)] @@ -366,14 +338,13 @@ where // use either test executor or real executor let executor_factory = match self.base_config.executor_config { TestExecutorConfig::Test(results) => { - let executor_factory = TestExecutorFactory::default(); + let executor_factory = MockExecutorProvider::default(); executor_factory.extend(results); - EitherExecutorFactory::Left(executor_factory) + Either::Left(executor_factory) + } + TestExecutorConfig::Real => { + Either::Right(EthExecutorProvider::ethereum(self.base_config.chain_spec.clone())) } - TestExecutorConfig::Real => EitherExecutorFactory::Right(EvmProcessorFactory::new( - self.base_config.chain_spec.clone(), - EthEvmConfig::default(), - )), }; let static_file_producer = StaticFileProducer::new( diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index ea7cfab8c..6fa61e34f 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -16,7 +16,6 @@ reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-interfaces.workspace = true -reth-provider.workspace = true # Ethereum revm-primitives.workspace = true diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 4239fe449..c3dd315f7 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,10 +1,10 @@ //! Ethereum block executor. -use crate::EthEvmConfig; +use crate::{verify::verify_receipts, EthEvmConfig}; use reth_evm::{ execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, }, ConfigureEvm, ConfigureEvmEnv, }; @@ -13,15 +13,13 @@ use reth_interfaces::{ provider::ProviderError, }; use reth_primitives::{ - BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, Receipts, - Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, + Receipts, Withdrawals, MAINNET, U256, }; -use reth_provider::BundleStateWithReceipts; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - processor::verify_receipt, stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, @@ -35,35 +33,33 @@ use tracing::debug; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] -pub struct EthExecutorProvider { +pub struct EthExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, inspector: Option, - prune_modes: PruneModes, } -impl EthExecutorProvider { +impl EthExecutorProvider { /// Creates a new default ethereum executor provider. pub fn ethereum(chain_spec: Arc) -> Self { Self::new(chain_spec, Default::default()) } + + /// Returns a new provider for the mainnet. + pub fn mainnet() -> Self { + Self::ethereum(MAINNET.clone()) + } } impl EthExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + Self { chain_spec, evm_config, inspector: None } } /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: InspectorStack) -> Self { - self.inspector = Some(inspector); - self - } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; + pub fn with_inspector(mut self, inspector: Option) -> Self { + self.inspector = inspector; self } } @@ -86,7 +82,7 @@ where } } -impl ExecutorProvider for EthExecutorProvider +impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, EvmConfig: ConfigureEvmEnv, @@ -102,14 +98,14 @@ where self.eth_executor(db) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { let executor = self.eth_executor(db); EthBatchExecutor { executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + batch_record: BlockBatchRecord::new(prune_modes), stats: BlockExecutorStats::default(), } } @@ -318,9 +314,11 @@ where // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = - verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) - { + if let Err(error) = verify_receipts( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + ) { debug!(target: "evm", %error, ?receipts, "receipts verification failed"); return Err(error) }; @@ -382,8 +380,8 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; /// Executes the block and commits the state changes. @@ -394,13 +392,13 @@ where /// /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) } } @@ -433,12 +431,12 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; // prepare the state according to the prune mode @@ -448,18 +446,30 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + Ok(()) } fn finalize(mut self) -> Self::Output { self.stats.log_debug(); - BundleStateWithReceipts::new( + BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), self.batch_record.first_block().unwrap_or_default(), ) } + + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn size_hint(&self) -> Option { + Some(self.executor.state.bundle_state.size_hint()) + } } #[cfg(test)] @@ -468,7 +478,7 @@ mod tests { use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, - keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, MAINNET, + keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, @@ -497,12 +507,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } + EthExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } } #[test] @@ -697,7 +702,8 @@ mod tests { let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -777,7 +783,8 @@ mod tests { let provider = executor_provider(chain_spec); // execute header - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); // Now execute a block with the fixed header, ensure that it does not fail executor diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index adcfd700d..88621a66a 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -16,6 +16,7 @@ use reth_primitives::{ }; use reth_revm::{Database, EvmBuilder}; pub mod execute; +pub mod verify; /// Ethereum-related EVM configuration. #[derive(Debug, Clone, Copy, Default)] diff --git a/crates/ethereum/evm/src/verify.rs b/crates/ethereum/evm/src/verify.rs new file mode 100644 index 000000000..6f552fe42 --- /dev/null +++ b/crates/ethereum/evm/src/verify.rs @@ -0,0 +1,53 @@ +//! Helpers for verifying the receipts. + +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Bloom, GotExpected, Receipt, ReceiptWithBloom, B256}; + +/// Calculate the receipts root, and compare it against against the expected receipts root and logs +/// bloom. +pub fn verify_receipts<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +pub fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), BlockExecutionError> { + if calculated_receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + ) + .into()) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + ) + .into()) + } + + Ok(()) +} diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index f13c471a7..854dcd95a 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -17,3 +17,11 @@ revm-primitives.workspace = true revm.workspace = true reth-interfaces.workspace = true +futures-util.workspace = true +parking_lot = { workspace = true, optional = true } + +[dev-dependencies] +parking_lot.workspace = true + +[features] +test-utils = ["dep:parking_lot"] \ No newline at end of file diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs new file mode 100644 index 000000000..d1ae4ed78 --- /dev/null +++ b/crates/evm/src/either.rs @@ -0,0 +1,119 @@ +//! Helper type that represents one of two possible executor types + +use crate::execute::{ + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, +}; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use revm_primitives::db::Database; + +// re-export Either +pub use futures_util::future::Either; + +impl BlockExecutorProvider for Either +where + A: BlockExecutorProvider, + B: BlockExecutorProvider, +{ + type Executor> = Either, B::Executor>; + type BatchExecutor> = + Either, B::BatchExecutor>; + + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + match self { + Either::Left(a) => Either::Left(a.executor(db)), + Either::Right(b) => Either::Right(b.executor(db)), + } + } + + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + where + DB: Database, + { + match self { + Either::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), + Either::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), + } + } +} + +impl Executor for Either +where + A: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >, + B: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >, + DB: Database, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute(self, input: Self::Input<'_>) -> Result { + match self { + Either::Left(a) => a.execute(input), + Either::Right(b) => b.execute(input), + } + } +} + +impl BatchExecutor for Either +where + A: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >, + B: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >, + DB: Database, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + match self { + Either::Left(a) => a.execute_one(input), + Either::Right(b) => b.execute_one(input), + } + } + + fn finalize(self) -> Self::Output { + match self { + Either::Left(a) => a.finalize(), + Either::Right(b) => b.finalize(), + } + } + + fn set_tip(&mut self, tip: BlockNumber) { + match self { + Either::Left(a) => a.set_tip(tip), + Either::Right(b) => b.set_tip(tip), + } + } + + fn size_hint(&self) -> Option { + match self { + Either::Left(a) => a.size_hint(), + Either::Right(b) => b.size_hint(), + } + } +} diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index b8c153602..7b3e58646 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,7 +1,7 @@ //! Traits for execution. -use reth_interfaces::provider::ProviderError; -use reth_primitives::U256; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, U256}; use revm::db::BundleState; use revm_primitives::db::Database; @@ -21,8 +21,8 @@ pub trait Executor { fn execute(self, input: Self::Input<'_>) -> Result; } -/// An executor that can execute multiple blocks in a row and keep track of the state over the -/// entire batch. +/// A general purpose executor that can execute multiple inputs in sequence and keep track of the +/// state over the entire batch. pub trait BatchExecutor { /// The input type for the executor. type Input<'a>; @@ -32,17 +32,20 @@ pub trait BatchExecutor { type Error; /// Executes the next block in the batch and update the state internally. - fn execute_one(&mut self, input: Self::Input<'_>) -> Result; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; /// Finishes the batch and return the final state. fn finalize(self) -> Self::Output; -} -/// The output of an executed block in a batch. -#[derive(Debug, Clone, Copy)] -pub struct BatchBlockOutput { - /// The size hint of the batch's tracked state. - pub size_hint: Option, + /// Set the expected tip of the batch. + /// + /// This can be used to optimize state pruning during execution. + fn set_tip(&mut self, tip: BlockNumber); + + /// The size hint of the batch's tracked state size. + /// + /// This is used to optimize DB commits depending on the size of the state. + fn size_hint(&self) -> Option; } /// The output of an ethereum block. @@ -51,7 +54,7 @@ pub struct BatchBlockOutput { /// /// TODO(mattsse): combine with BundleStateWithReceipts #[derive(Debug)] -pub struct EthBlockOutput { +pub struct BlockExecutionOutput { /// The changed state of the block after execution. pub state: BundleState, /// All the receipts of the transactions in the block. @@ -60,42 +63,94 @@ pub struct EthBlockOutput { pub gas_used: u64, } +/// The output of a batch of ethereum blocks. +#[derive(Debug)] +pub struct BatchBlockExecutionOutput { + /// Bundle state with reverts. + pub bundle: BundleState, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + pub receipts: Receipts, + /// First block of bundle state. + pub first_block: BlockNumber, +} + +impl BatchBlockExecutionOutput { + /// Create Bundle State. + pub fn new(bundle: BundleState, receipts: Receipts, first_block: BlockNumber) -> Self { + Self { bundle, receipts, first_block } + } +} + /// A helper type for ethereum block inputs that consists of a block and the total difficulty. #[derive(Debug)] -pub struct EthBlockExecutionInput<'a, Block> { +pub struct BlockExecutionInput<'a, Block> { /// The block to execute. pub block: &'a Block, /// The total difficulty of the block. pub total_difficulty: U256, } -impl<'a, Block> EthBlockExecutionInput<'a, Block> { +impl<'a, Block> BlockExecutionInput<'a, Block> { /// Creates a new input. pub fn new(block: &'a Block, total_difficulty: U256) -> Self { Self { block, total_difficulty } } } -impl<'a, Block> From<(&'a Block, U256)> for EthBlockExecutionInput<'a, Block> { +impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { fn from((block, total_difficulty): (&'a Block, U256)) -> Self { Self::new(block, total_difficulty) } } -/// A type that can create a new executor. -pub trait ExecutorProvider: Send + Sync + Clone { +/// A type that can create a new executor for block execution. +pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// An executor that can execute a single block given a database. - type Executor>: Executor; + /// + /// # Verification + /// + /// The on [Executor::execute] the executor is expected to validate the execution output of the + /// input, this includes: + /// - Cumulative gas used must match the input's gas used. + /// - Receipts must match the input's receipts root. + /// + /// It is not expected to validate the state trie root, this must be done by the caller using + /// the returned state. + type Executor>: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >; + /// An executor that can execute a batch of blocks given a database. + type BatchExecutor>: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + // TODO: change to bundle state with receipts + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >; - type BatchExecutor>: BatchExecutor; /// Creates a new executor for single block execution. + /// + /// This is used to execute a single block and get the changed state. fn executor(&self, db: DB) -> Self::Executor where DB: Database; - /// Creates a new batch executor - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + /// Creates a new batch executor with the given database and pruning modes. + /// + /// Batch executor is used to execute multiple blocks in sequence and keep track of the state + /// during historical sync which involves executing multiple blocks in sequence. + /// + /// The pruning modes are used to determine which parts of the state should be kept during + /// execution. + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database; } @@ -103,13 +158,14 @@ pub trait ExecutorProvider: Send + Sync + Clone { #[cfg(test)] mod tests { use super::*; + use reth_primitives::Block; use revm::db::{CacheDB, EmptyDBTyped}; use std::marker::PhantomData; #[derive(Clone, Default)] struct TestExecutorProvider; - impl ExecutorProvider for TestExecutorProvider { + impl BlockExecutorProvider for TestExecutorProvider { type Executor> = TestExecutor; type BatchExecutor> = TestExecutor; @@ -120,7 +176,7 @@ mod tests { TestExecutor(PhantomData) } - fn batch_executor(&self, _db: DB) -> Self::BatchExecutor + fn batch_executor(&self, _db: DB, _prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { @@ -131,28 +187,35 @@ mod tests { struct TestExecutor(PhantomData); impl Executor for TestExecutor { - type Input<'a> = &'static str; - type Output = (); - type Error = String; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { - Ok(()) + Err(BlockExecutionError::UnavailableForTest) } } impl BatchExecutor for TestExecutor { - type Input<'a> = &'static str; - type Output = (); - type Error = String; - - fn execute_one( - &mut self, - _input: Self::Input<'_>, - ) -> Result { - Ok(BatchBlockOutput { size_hint: None }) + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { + Ok(()) + } + + fn finalize(self) -> Self::Output { + todo!() } - fn finalize(self) -> Self::Output {} + fn set_tip(&mut self, _tip: BlockNumber) { + todo!() + } + + fn size_hint(&self) -> Option { + None + } } #[test] @@ -160,6 +223,9 @@ mod tests { let provider = TestExecutorProvider; let db = CacheDB::>::default(); let executor = provider.executor(db); - executor.execute("test").unwrap(); + let block = + Block { header: Default::default(), body: vec![], ommers: vec![], withdrawals: None }; + let block = BlockWithSenders::new(block, Default::default()).unwrap(); + let _ = executor.execute(BlockExecutionInput::new(&block, U256::ZERO)); } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index d8e50b759..c69e33d65 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -12,8 +12,13 @@ use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Tra use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; +pub mod either; pub mod execute; +#[cfg(any(test, feature = "test-utils"))] +/// test helpers for mocking executor +pub mod test_utils; + /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { /// Associated type for the default external context that should be configured for the EVM. diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs new file mode 100644 index 000000000..e0ee46917 --- /dev/null +++ b/crates/evm/src/test_utils.rs @@ -0,0 +1,80 @@ +//! Helpers for testing. + +use crate::execute::{ + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, +}; +use parking_lot::Mutex; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use revm_primitives::db::Database; +use std::sync::Arc; + +/// A [BlockExecutorProvider] that returns mocked execution results. +#[derive(Clone, Debug, Default)] +pub struct MockExecutorProvider { + exec_results: Arc>>, +} + +impl MockExecutorProvider { + /// Extend the mocked execution results + pub fn extend(&self, results: impl IntoIterator>) { + self.exec_results.lock().extend(results.into_iter().map(Into::into)); + } +} + +impl BlockExecutorProvider for MockExecutorProvider { + type Executor> = Self; + + type BatchExecutor> = Self; + + fn executor(&self, _: DB) -> Self::Executor + where + DB: Database, + { + self.clone() + } + + fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + where + DB: Database, + { + self.clone() + } +} + +impl Executor for MockExecutorProvider { + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute(self, _: Self::Input<'_>) -> Result { + let BatchBlockExecutionOutput { bundle, receipts, .. } = + self.exec_results.lock().pop().unwrap(); + Ok(BlockExecutionOutput { + state: bundle, + receipts: receipts.into_iter().flatten().flatten().collect(), + gas_used: 0, + }) + } +} + +impl BatchExecutor for MockExecutorProvider { + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { + Ok(()) + } + + fn finalize(self) -> Self::Output { + self.exec_results.lock().pop().unwrap() + } + + fn set_tip(&mut self, _: BlockNumber) {} + + fn size_hint(&self) -> Option { + None + } +} diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 733047400..7cedb4977 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -53,11 +53,20 @@ impl FullNodeTypes for ExExContext { impl FullNodeComponents for ExExContext { type Pool = Node::Pool; type Evm = Node::Evm; + type Executor = Node::Executor; fn pool(&self) -> &Self::Pool { self.components.pool() } + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } + + fn block_executor(&self) -> &Self::Executor { + self.components.block_executor() + } + fn provider(&self) -> &Self::Provider { self.components.provider() } @@ -73,8 +82,4 @@ impl FullNodeComponents for ExExContext { fn task_executor(&self) -> &TaskExecutor { self.components.task_executor() } - - fn evm_config(&self) -> &Self::Evm { - self.components.evm_config() - } } diff --git a/crates/node-ethereum/src/evm.rs b/crates/node-ethereum/src/evm.rs index a5528d74a..d710d8d8d 100644 --- a/crates/node-ethereum/src/evm.rs +++ b/crates/node-ethereum/src/evm.rs @@ -1,4 +1,6 @@ //! Ethereum EVM support +#[doc(inline)] +pub use reth_evm_ethereum::execute::EthExecutorProvider; #[doc(inline)] pub use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/node-ethereum/src/lib.rs b/crates/node-ethereum/src/lib.rs index cea2e7be0..44ec6836c 100644 --- a/crates/node-ethereum/src/lib.rs +++ b/crates/node-ethereum/src/lib.rs @@ -11,7 +11,7 @@ pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::EthEvmConfig; +pub use evm::{EthEvmConfig, EthExecutorProvider}; pub mod node; pub use node::EthereumNode; diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 815b949de..235130b42 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -2,6 +2,7 @@ use crate::{EthEngineTypes, EthEvmConfig}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; use reth_node_builder::{ components::{ @@ -76,9 +77,18 @@ where Node: FullNodeTypes, { type EVM = EthEvmConfig; + type Executor = EthExecutorProvider; - async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { - Ok(EthEvmConfig::default()) + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::default(); + let executor = + EthExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + + Ok((evm_config, executor)) } } diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 0a76f7504..355a7ecab 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -5,6 +5,7 @@ use reth_db::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, }; +use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; @@ -88,12 +89,18 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. type Evm: ConfigureEvm; + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; /// Returns the node's evm config. fn evm_config(&self) -> &Self::Evm; + /// Returns the node's executor type. + fn block_executor(&self) -> &Self::Executor; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 136c27d7c..68c1d5f0c 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -17,6 +17,7 @@ reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-exex.workspace = true +reth-evm.workspace = true reth-provider.workspace = true reth-revm.workspace = true reth-db.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index b6f0a191e..0457bbe3e 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -27,6 +27,7 @@ use reth_node_core::{ }; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; +use reth_revm::stack::{InspectorStack, InspectorStackConfig}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; pub use states::*; @@ -460,6 +461,28 @@ impl BuilderContext { &self.config } + /// Returns an inspector stack if configured. + /// + /// This can be used to debug block execution. + pub fn inspector_stack(&self) -> Option { + use reth_revm::stack::Hook; + let stack_config = InspectorStackConfig { + use_printer_tracer: self.config.debug.print_inspector, + hook: if let Some(hook_block) = self.config.debug.hook_block { + Hook::Block(hook_block) + } else if let Some(tx) = self.config.debug.hook_transaction { + Hook::Transaction(tx) + } else if self.config.debug.hook_all { + Hook::All + } else { + // no inspector + return None + }, + }; + + Some(InspectorStack::new(stack_config)) + } + /// Returns the data dir of the node. /// /// This gives access to all relevant files and directories of the node's datadir. diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 753978de1..103e4f174 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -98,6 +98,7 @@ impl> FullNodeTypes for NodeAdapter impl> FullNodeComponents for NodeAdapter { type Pool = C::Pool; type Evm = C::Evm; + type Executor = C::Executor; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -107,6 +108,10 @@ impl> FullNodeComponents for NodeAdapter< self.components.evm_config() } + fn block_executor(&self) -> &Self::Executor { + self.components.block_executor() + } + fn provider(&self) -> &Self::Provider { &self.provider } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index d17cdc8ee..abeb2ca05 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,6 +7,7 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use reth_evm::execute::BlockExecutorProvider; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; @@ -232,7 +233,7 @@ where PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, { - type Components = Components; + type Components = Components; async fn build_components( self, @@ -246,12 +247,12 @@ where _marker, } = self; - let evm_config = evm_builder.build_evm(context).await?; + let (evm_config, executor) = evm_builder.build_evm(context).await?; let pool = pool_builder.build_pool(context).await?; let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - Ok(Components { transaction_pool: pool, evm_config, network, payload_builder }) + Ok(Components { transaction_pool: pool, evm_config, network, payload_builder, executor }) } } @@ -287,15 +288,16 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 01684e9c2..891f8e01f 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,34 +1,41 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; +use reth_evm::execute::BlockExecutorProvider; use reth_node_api::ConfigureEvm; use std::future::Future; /// A type that knows how to build the executor types. pub trait ExecutorBuilder: Send { - /// The EVM config to build. + /// The EVM config to use. + /// + /// This provides the node with the necessary configuration to configure an EVM. type EVM: ConfigureEvm; - // TODO(mattsse): integrate `Executor` + + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; /// Creates the EVM config. fn build_evm( self, ctx: &BuilderContext, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } -impl ExecutorBuilder for F +impl ExecutorBuilder for F where Node: FullNodeTypes, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future> + Send, + Fut: Future> + Send, { type EVM = EVM; + type Executor = Executor; fn build_evm( self, ctx: &BuilderContext, - ) -> impl Future> { + ) -> impl Future> { self(ctx) } } diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 24d83da0d..ef5ea4995 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -13,6 +13,7 @@ pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; +use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::TransactionPool; @@ -35,12 +36,18 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. type Evm: ConfigureEvm; + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; /// Returns the node's evm config. fn evm_config(&self) -> &Self::Evm; + /// Returns the node's executor type. + fn block_executor(&self) -> &Self::Executor; + /// Returns the handle to the network fn network(&self) -> &NetworkHandle; @@ -52,25 +59,29 @@ pub trait NodeComponents: Clone + Send + Sync + 'stati /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. pub evm_config: EVM, + /// The node's executor type used to execute individual blocks and batches of blocks. + pub executor: Executor, /// The network implementation of the node. pub network: NetworkHandle, /// The handle to the payload builder service. pub payload_builder: PayloadBuilderHandle, } -impl NodeComponents for Components +impl NodeComponents for Components where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, { type Pool = Pool; type Evm = EVM; + type Executor = Executor; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -80,6 +91,10 @@ where &self.evm_config } + fn block_executor(&self) -> &Self::Executor { + &self.executor + } + fn network(&self) -> &NetworkHandle { &self.network } @@ -89,16 +104,18 @@ where } } -impl Clone for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, EVM: ConfigureEvm, + Executor: BlockExecutorProvider, { fn clone(&self) -> Self { Self { transaction_pool: self.transaction_pool.clone(), evm_config: self.evm_config.clone(), + executor: self.executor.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 201965fa9..e8c5b2967 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -30,7 +30,6 @@ use reth_node_core::{ use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; -use reth_revm::EvmProcessorFactory; use reth_rpc_engine_api::EngineApi; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; @@ -157,7 +156,7 @@ where let tree_externals = TreeExternals::new( ctx.provider_factory().clone(), consensus.clone(), - EvmProcessorFactory::new(ctx.chain_spec(), components.evm_config().clone()), + components.block_executor().clone(), ); let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? .with_sync_metrics_tx(sync_metrics_tx.clone()) @@ -303,7 +302,7 @@ where consensus_engine_tx.clone(), canon_state_notification_sender, mining_mode, - node_adapter.components.evm_config().clone(), + node_adapter.components.block_executor().clone(), ) .build(); @@ -318,7 +317,7 @@ where ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.evm_config().clone(), + node_adapter.components.block_executor().clone(), pipeline_exex_handle, ) .await?; @@ -341,7 +340,7 @@ where ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.evm_config().clone(), + node_adapter.components.block_executor().clone(), pipeline_exex_handle, ) .await?; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 03bf45893..8033ab1c6 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -1,6 +1,5 @@ //! Helpers for setting up parts of the node. -use crate::ConfigureEvm; use reth_config::{config::StageConfig, PruneConfig}; use reth_consensus::Consensus; use reth_db::database::Database; @@ -8,6 +7,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_interfaces::p2p::{ bodies::{client::BodiesClient, downloader::BodyDownloader}, @@ -18,7 +18,6 @@ use reth_node_core::{ primitives::{BlockNumber, B256}, }; use reth_provider::{HeaderSyncMode, ProviderFactory}; -use reth_revm::stack::{Hook, InspectorStackConfig}; use reth_stages::{ prelude::DefaultStages, stages::{ @@ -36,7 +35,7 @@ use tokio::sync::watch; /// Constructs a [Pipeline] that's wired to the network #[allow(clippy::too_many_arguments)] -pub async fn build_networked_pipeline( +pub async fn build_networked_pipeline( node_config: &NodeConfig, config: &StageConfig, client: Client, @@ -47,13 +46,13 @@ pub async fn build_networked_pipeline( prune_config: Option, max_block: Option, static_file_producer: StaticFileProducer, - evm_config: EvmConfig, + executor: Executor, exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Unpin + Clone + 'static, Client: HeadersClient + BodiesClient + Clone + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, + Executor: BlockExecutorProvider, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -75,7 +74,7 @@ where metrics_tx, prune_config, static_file_producer, - evm_config, + executor, exex_manager_handle, ) .await?; @@ -85,7 +84,7 @@ where /// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. #[allow(clippy::too_many_arguments)] -pub async fn build_pipeline( +pub async fn build_pipeline( node_config: &NodeConfig, provider_factory: ProviderFactory, stage_config: &StageConfig, @@ -96,14 +95,14 @@ pub async fn build_pipeline( metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, static_file_producer: StaticFileProducer, - evm_config: EvmConfig, + executor: Executor, exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Clone + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, + Executor: BlockExecutorProvider, { let mut builder = Pipeline::builder(); @@ -113,22 +112,6 @@ where } let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new(node_config.chain.clone(), evm_config); - - let stack_config = InspectorStackConfig { - use_printer_tracer: node_config.debug.print_inspector, - hook: if let Some(hook_block) = node_config.debug.hook_block { - Hook::Block(hook_block) - } else if let Some(tx) = node_config.debug.hook_transaction { - Hook::Transaction(tx) - } else if node_config.debug.hook_all { - Hook::All - } else { - Hook::None - }, - }; - - let factory = factory.with_stack_config(stack_config); let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); @@ -147,7 +130,7 @@ where Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_config.etl.clone(), ) .set(SenderRecoveryStage { @@ -155,7 +138,7 @@ where }) .set( ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index fbffa1245..8e5afc5ef 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -19,6 +19,7 @@ reth-interfaces.workspace = true reth-provider.workspace = true # Optimism +revm.workspace = true revm-primitives.workspace = true # misc diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 0a5e05780..2ea32782c 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,10 +1,10 @@ //! Optimism block executor. -use crate::OptimismEvmConfig; +use crate::{l1::ensure_create2_deployer, verify::verify_receipts, OptimismEvmConfig}; use reth_evm::{ execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, }, ConfigureEvm, ConfigureEvmEnv, }; @@ -13,16 +13,12 @@ use reth_interfaces::{ provider::ProviderError, }; use reth_primitives::{ - proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, - GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, - Withdrawals, B256, U256, + BlockNumber, BlockWithSenders, Bytes, ChainSpec, GotExpected, Hardfork, Header, PruneModes, + Receipt, Receipts, TxType, Withdrawals, U256, }; -use reth_provider::BundleStateWithReceipts; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - optimism::ensure_create2_deployer, - processor::compare_receipts_root_and_logs_bloom, stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, @@ -36,14 +32,13 @@ use tracing::{debug, trace}; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, inspector: Option, - prune_modes: PruneModes, } -impl OpExecutorProvider { +impl OpExecutorProvider { /// Creates a new default optimism executor provider. pub fn optimism(chain_spec: Arc) -> Self { Self::new(chain_spec, Default::default()) @@ -53,7 +48,7 @@ impl OpExecutorProvider { impl OpExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } + Self { chain_spec, evm_config, inspector: None } } /// Configures an optional inspector stack for debugging. @@ -61,12 +56,6 @@ impl OpExecutorProvider { self.inspector = inspector; self } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self - } } impl OpExecutorProvider @@ -87,7 +76,7 @@ where } } -impl ExecutorProvider for OpExecutorProvider +impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, EvmConfig: ConfigureEvmEnv, @@ -102,14 +91,14 @@ where self.op_executor(db) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { let executor = self.op_executor(db); OpBatchExecutor { executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + batch_record: BlockBatchRecord::new(prune_modes), stats: BlockExecutorStats::default(), } } @@ -370,7 +359,7 @@ where // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipt_optimism( + if let Err(error) = verify_receipts( block.header.receipts_root, block.header.logs_bloom, receipts.iter(), @@ -424,8 +413,8 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; /// Executes the block and commits the state changes. @@ -436,13 +425,13 @@ where /// /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) } } @@ -478,12 +467,12 @@ where EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; // prepare the state according to the prune mode @@ -493,45 +482,30 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + Ok(()) } fn finalize(mut self) -> Self::Output { - // TODO: track stats self.stats.log_debug(); - BundleStateWithReceipts::new( + BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), self.batch_record.first_block().unwrap_or_default(), ) } -} -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn size_hint(&self) -> Option { + Some(self.executor.state.bundle_state.size_hint()) + } } #[cfg(test)] @@ -574,12 +548,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } + OpExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } } #[test] @@ -626,7 +595,8 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); @@ -706,7 +676,8 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); diff --git a/crates/revm/src/optimism/mod.rs b/crates/optimism/evm/src/l1.rs similarity index 97% rename from crates/revm/src/optimism/mod.rs rename to crates/optimism/evm/src/l1.rs index 0dc6c6877..896cbc36a 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,3 +1,5 @@ +//! Optimism-specific implementation and utilities for the executor + use reth_interfaces::{ executor::{self as reth_executor, BlockExecutionError}, RethError, @@ -10,14 +12,13 @@ use revm::{ use std::sync::Arc; use tracing::trace; -/// Optimism-specific processor implementation for the `EVMProcessor` -pub mod processor; - /// The address of the create2 deployer const CREATE_2_DEPLOYER_ADDR: Address = address!("13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2"); + /// The codehash of the create2 deployer contract. const CREATE_2_DEPLOYER_CODEHASH: B256 = b256!("b0550b5b431e30d38000efb7107aaa0ade03d48a7198a140edda9d27134468b2"); + /// The raw bytecode of the create2 deployer contract. const CREATE_2_DEPLOYER_BYTECODE: [u8; 1584] = hex!("6080604052600436106100435760003560e01c8063076c37b21461004f578063481286e61461007157806356299481146100ba57806366cfa057146100da57600080fd5b3661004a57005b600080fd5b34801561005b57600080fd5b5061006f61006a366004610327565b6100fa565b005b34801561007d57600080fd5b5061009161008c366004610327565b61014a565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100c657600080fd5b506100916100d5366004610349565b61015d565b3480156100e657600080fd5b5061006f6100f53660046103ca565b610172565b61014582826040518060200161010f9061031a565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052610183565b505050565b600061015683836102e7565b9392505050565b600061016a8484846102f0565b949350505050565b61017d838383610183565b50505050565b6000834710156101f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e636500000060448201526064015b60405180910390fd5b815160000361025f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f60448201526064016101eb565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff8116610156576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f790000000000000060448201526064016101eb565b60006101568383305b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b61014e806104ad83390190565b6000806040838503121561033a57600080fd5b50508035926020909101359150565b60008060006060848603121561035e57600080fd5b8335925060208401359150604084013573ffffffffffffffffffffffffffffffffffffffff8116811461039057600080fd5b809150509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156103df57600080fd5b8335925060208401359150604084013567ffffffffffffffff8082111561040557600080fd5b818601915086601f83011261041957600080fd5b81358181111561042b5761042b61039b565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104715761047161039b565b8160405282815289602084870101111561048a57600080fd5b826020860160208301376000602084830101528095505050505050925092509256fe608060405234801561001057600080fd5b5061012e806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063249cb3fa14602d575b600080fd5b603c603836600460b1565b604e565b60405190815260200160405180910390f35b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915281205460ff16608857600060aa565b7fa2ef4600d742022d532d4747cb3547474667d6f13804902513b2ec01c848f4b45b9392505050565b6000806040838503121560c357600080fd5b82359150602083013573ffffffffffffffffffffffffffffffffffffffff8116811460ed57600080fd5b80915050925092905056fea26469706673582212205ffd4e6cede7d06a5daf93d48d0541fc68189eeb16608c1999a82063b666eb1164736f6c63430008130033a2646970667358221220fdc4a0fe96e3b21c108ca155438d37c9143fb01278a3c1d274948bad89c564ba64736f6c63430008130033"); @@ -75,21 +76,21 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +pub fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), BlockExecutionError> { + if calculated_receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + ) + .into()) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + ) + .into()) + } + + Ok(()) +} diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 7e7d54703..a2cbc287c 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,7 +7,7 @@ use crate::{ }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_evm::ConfigureEvm; -use reth_evm_optimism::OptimismEvmConfig; +use reth_evm_optimism::{OpExecutorProvider, OptimismEvmConfig}; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ components::{ @@ -97,9 +97,18 @@ where Node: FullNodeTypes, { type EVM = OptimismEvmConfig; + type Executor = OpExecutorProvider; - async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { - Ok(OptimismEvmConfig::default()) + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = OptimismEvmConfig::default(); + let executor = + OpExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + + Ok((evm_config, executor)) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 7ee1bb9ec..db6a6266e 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -1,8 +1,9 @@ //! OP transaction pool types use parking_lot::RwLock; +use reth_evm_optimism::RethL1BlockInfo; use reth_primitives::{Block, ChainSpec, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; -use reth_revm::{optimism::RethL1BlockInfo, L1BlockInfo}; +use reth_revm::L1BlockInfo; use reth_transaction_pool::{ CoinbaseTipOrdering, EthPoolTransaction, EthPooledTransaction, EthTransactionValidator, Pool, TransactionOrigin, TransactionValidationOutcome, TransactionValidationTaskExecutor, @@ -75,7 +76,7 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - if let Ok(cost_addition) = reth_revm::optimism::extract_l1_info(block) { + if let Ok(cost_addition) = reth_evm_optimism::extract_l1_info(block) { *self.block_info.l1_block_info.write() = cost_addition; } } diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index ebc776e74..c58d0ecb5 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -21,6 +21,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true +reth-evm-optimism.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true @@ -39,4 +40,5 @@ optimism = [ "reth-revm/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", + "reth-evm-optimism/optimism", ] \ No newline at end of file diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 8e8bfb8f0..2794ad968 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -303,7 +303,7 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - reth_revm::optimism::ensure_create2_deployer( + reth_evm_optimism::ensure_create2_deployer( chain_spec.clone(), attributes.payload_attributes.timestamp, &mut db, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 5c62f324e..151d53a97 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -17,7 +17,7 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true -reth-evm.workspace = true +reth-evm = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } # revm @@ -28,10 +28,11 @@ revm-inspectors.workspace = true tracing.workspace = true [dev-dependencies] +reth-evm.workspace = true reth-trie.workspace = true [features] -test-utils = ["dep:reth-trie"] +test-utils = ["dep:reth-trie", "dep:reth-evm"] optimism = [ "revm/optimism", "reth-primitives/optimism", diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs deleted file mode 100644 index fdaae52c0..000000000 --- a/crates/revm/src/factory.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::{ - database::StateProviderDatabase, - processor::EVMProcessor, - stack::{InspectorStack, InspectorStackConfig}, -}; -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::ChainSpec; -use reth_provider::{ExecutorFactory, PrunableBlockExecutor, StateProvider}; -use std::sync::Arc; - -/// Factory for creating [EVMProcessor]. -#[derive(Clone, Debug)] -pub struct EvmProcessorFactory { - chain_spec: Arc, - stack: Option, - /// Type that defines how the produced EVM should be configured. - evm_config: EvmConfig, -} - -impl EvmProcessorFactory { - /// Create new factory - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, stack: None, evm_config } - } - - /// Sets the inspector stack for all generated executors. - pub fn with_stack(mut self, stack: InspectorStack) -> Self { - self.stack = Some(stack); - self - } - - /// Sets the inspector stack for all generated executors using the provided config. - pub fn with_stack_config(mut self, config: InspectorStackConfig) -> Self { - self.stack = Some(InspectorStack::new(config)); - self - } -} - -impl ExecutorFactory for EvmProcessorFactory -where - EvmConfig: ConfigureEvm + Send + Sync + Clone + 'static, -{ - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a> { - let database_state = StateProviderDatabase::new(sp); - let mut evm = - EVMProcessor::new_with_db(self.chain_spec.clone(), database_state, &self.evm_config); - if let Some(stack) = &self.stack { - evm.set_stack(stack.clone()); - } - Box::new(evm) - } -} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index f4ed01ada..d8c5761d0 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,20 +11,11 @@ /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; -/// revm implementation of reth block and transaction executors. -mod factory; - pub mod batch; -/// new revm account state executor -pub mod processor; - /// State changes that are not related to transactions. pub mod state_change; -/// revm executor factory. -pub use factory::EvmProcessorFactory; - /// Ethereum DAO hardfork state change data. pub mod eth_dao_fork; @@ -33,10 +24,6 @@ pub mod eth_dao_fork; /// used in the main Reth executor. pub mod stack; -/// Optimism-specific implementation and utilities for the executor -#[cfg(feature = "optimism")] -pub mod optimism; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs deleted file mode 100644 index 9fe51d059..000000000 --- a/crates/revm/src/optimism/processor.rs +++ /dev/null @@ -1,401 +0,0 @@ -use crate::processor::{compare_receipts_root_and_logs_bloom, EVMProcessor}; -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::{ - BlockExecutionError, BlockValidationError, OptimismBlockExecutionError, -}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, revm_primitives::ResultAndState, BlockWithSenders, - Bloom, ChainSpec, Hardfork, Receipt, ReceiptWithBloom, TxType, B256, U256, -}; -use reth_provider::{BlockExecutor, BundleStateWithReceipts}; -use revm::DatabaseCommit; -use std::time::Instant; -use tracing::{debug, trace}; - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -impl<'a, EvmConfig> BlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - // execute block - let receipts = self.execute_inner(block, total_difficulty)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - let time = Instant::now(); - if let Err(error) = verify_receipt_optimism( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec.as_ref(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - self.stats.receipt_root_duration += time.elapsed(); - } - - self.batch_record.save_receipts(receipts) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((Vec::new(), 0)) - } - - let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - super::ensure_create2_deployer(self.chain_spec().clone(), block.timestamp, self.db_mut()) - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - })?; - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - let time = Instant::now(); - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - self.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; - - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, *sender)?; - trace!( - target: "evm", - ?transaction, ?result, ?state, - "Executed transaction" - ); - self.stats.execution_duration += time.elapsed(); - let time = Instant::now(); - - self.db_mut().commit(state); - - self.stats.apply_state_duration += time.elapsed(); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(Into::into).collect(), - #[cfg(feature = "optimism")] - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - #[cfg(feature = "optimism")] - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec() - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - BundleStateWithReceipts::new( - self.evm.context.evm.db.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } - - fn size_hint(&self) -> Option { - Some(self.evm.context.evm.db.bundle_size_hint()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - database::StateProviderDatabase, - test_utils::{StateProviderTest, TestEvmConfig}, - }; - use reth_primitives::{ - b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, - }; - use revm::L1_BLOCK_CONTRACT; - use std::{collections::HashMap, str::FromStr, sync::Arc}; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::new(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn create_op_evm_processor<'a>( - chain_spec: Arc, - db: StateProviderTest, - ) -> EVMProcessor<'a, TestEvmConfig> { - static CONFIG: std::sync::OnceLock = std::sync::OnceLock::new(); - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - CONFIG.get_or_init(TestEvmConfig::default), - ); - executor.evm.context.evm.db.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - executor - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: addr.into(), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: 21_000, - ..Default::default() - }), - Signature::default(), - ); - - let mut executor = create_op_evm_processor(chain_spec, db); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .unwrap(); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: addr.into(), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: 21_000, - ..Default::default() - }), - Signature::optimism_deposit_tx_signature(), - ); - - let mut executor = create_op_evm_processor(chain_spec, db); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .expect("Executing a block while canyon is active should not fail"); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs deleted file mode 100644 index 487cec528..000000000 --- a/crates/revm/src/processor.rs +++ /dev/null @@ -1,865 +0,0 @@ -#[cfg(not(feature = "optimism"))] -use revm::DatabaseCommit; -use revm::{ - db::StateDBBox, - inspector_handle_register, - interpreter::Host, - primitives::{CfgEnvWithHandlerCfg, ResultAndState}, - Evm, State, -}; -use std::{marker::PhantomData, sync::Arc, time::Instant}; -#[cfg(not(feature = "optimism"))] -use tracing::{debug, trace}; - -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -#[cfg(feature = "optimism")] -use reth_primitives::revm::env::fill_op_tx_env; -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; -use reth_primitives::{ - Address, Block, BlockNumber, BlockWithSenders, Bloom, ChainSpec, GotExpected, Hardfork, Header, - PruneModes, Receipt, ReceiptWithBloom, Receipts, TransactionSigned, Withdrawals, B256, U256, -}; -#[cfg(not(feature = "optimism"))] -use reth_provider::BundleStateWithReceipts; -use reth_provider::{BlockExecutor, ProviderError, PrunableBlockExecutor, StateProvider}; - -use crate::{ - batch::{BlockBatchRecord, BlockExecutorStats}, - database::StateProviderDatabase, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - stack::{InspectorStack, InspectorStackConfig}, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, -}; - -/// EVMProcessor is a block executor that uses revm to execute blocks or multiple blocks. -/// -/// Output is obtained by calling `take_output_state` function. -/// -/// It is capable of pruning the data that will be written to the database -/// and implemented [PrunableBlockExecutor] traits. -/// -/// It implemented the [BlockExecutor] that give it the ability to take block -/// apply pre state (Cancun system contract call), execute transaction and apply -/// state change and then apply post execution changes (block reward, withdrawals, irregular DAO -/// hardfork state change). And if `execute_and_verify_receipt` is called it will verify the -/// receipt. -/// -/// InspectorStack are used for optional inspecting execution. And it contains -/// various duration of parts of execution. -#[allow(missing_debug_implementations)] -pub struct EVMProcessor<'a, EvmConfig> { - /// The configured chain-spec - pub(crate) chain_spec: Arc, - /// revm instance that contains database and env environment. - pub(crate) evm: Evm<'a, InspectorStack, StateDBBox<'a, ProviderError>>, - /// Keeps track of the recorded receipts and pruning configuration. - pub(crate) batch_record: BlockBatchRecord, - /// Execution stats - pub(crate) stats: BlockExecutorStats, - /// The type that is able to configure the EVM environment. - _phantom: PhantomData, -} - -impl<'a, EvmConfig> EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - /// Return chain spec. - pub fn chain_spec(&self) -> &Arc { - &self.chain_spec - } - - /// Creates a new executor from the given chain spec and database. - pub fn new_with_db( - chain_spec: Arc, - db: StateProviderDatabase, - evm_config: &'a EvmConfig, - ) -> Self { - let state = State::builder() - .with_database_boxed(Box::new(db)) - .with_bundle_update() - .without_state_clear() - .build(); - EVMProcessor::new_with_state(chain_spec, state, evm_config) - } - - /// Create a new EVM processor with the given revm state. - pub fn new_with_state( - chain_spec: Arc, - revm_state: StateDBBox<'a, ProviderError>, - evm_config: &'a EvmConfig, - ) -> Self { - let stack = InspectorStack::new(InspectorStackConfig::default()); - let evm = evm_config.evm_with_inspector(revm_state, stack); - EVMProcessor { - chain_spec, - evm, - batch_record: BlockBatchRecord::default(), - stats: BlockExecutorStats::default(), - _phantom: PhantomData, - } - } - - /// Configures the executor with the given inspectors. - pub fn set_stack(&mut self, stack: InspectorStack) { - self.evm.context.external = stack; - } - - /// Configure the executor with the given block. - pub fn set_first_block(&mut self, num: BlockNumber) { - self.batch_record.set_first_block(num); - } - - /// Saves the receipts to the batch record. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { - self.batch_record.save_receipts(receipts) - } - - /// Returns the recorded receipts. - pub fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns a reference to the database - pub fn db_mut(&mut self) -> &mut StateDBBox<'a, ProviderError> { - &mut self.evm.context.evm.db - } - - /// Initializes the config and block env. - pub(crate) fn init_env(&mut self, header: &Header, total_difficulty: U256) { - // Set state clear flag. - let state_clear_flag = - self.chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(header.number); - - self.db_mut().set_state_clear_flag(state_clear_flag); - - let mut cfg = - CfgEnvWithHandlerCfg::new_with_spec_id(self.evm.cfg().clone(), self.evm.spec_id()); - EvmConfig::fill_cfg_and_block_env( - &mut cfg, - self.evm.block_mut(), - &self.chain_spec, - header, - total_difficulty, - ); - *self.evm.cfg_mut() = cfg.cfg_env; - - // This will update the spec in case it changed - self.evm.modify_spec_id(cfg.handler_cfg.spec_id); - } - - /// Applies the pre-block call to the EIP-4788 beacon block root contract. - /// - /// If cancun is not activated or the block is the genesis block, then this is a no-op, and no - /// state changes are made. - fn apply_beacon_root_contract_call( - &mut self, - block: &Block, - ) -> Result<(), BlockExecutionError> { - apply_beacon_root_contract_call( - &self.chain_spec, - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut self.evm, - )?; - Ok(()) - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn apply_post_execution_state_change( - &mut self, - block: &Block, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - let mut balance_increments = post_block_balance_increments( - &self.chain_spec, - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); - - // Irregular state change at Ethereum DAO hardfork - if self.chain_spec.fork(Hardfork::Dao).transitions_at_block(block.number) { - // drain balances from hardcoded addresses. - let drained_balance: u128 = self - .db_mut() - .drain_balances(DAO_HARDKFORK_ACCOUNTS) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)? - .into_iter() - .sum(); - - // return balance to DAO beneficiary. - *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; - } - // increment balances - self.db_mut() - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(()) - } - - /// Runs a single transaction in the configured environment and proceeds - /// to return the result and state diff (without applying it). - /// - /// Assumes the rest of the block environment has been filled via `init_block_env`. - pub fn transact( - &mut self, - transaction: &TransactionSigned, - sender: Address, - ) -> Result { - // Fill revm structure. - #[cfg(not(feature = "optimism"))] - fill_tx_env(self.evm.tx_mut(), transaction, sender); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut envelope_buf); - fill_op_tx_env(self.evm.tx_mut(), transaction, sender, envelope_buf.into()); - } - - let hash = transaction.hash_ref(); - let should_inspect = self.evm.context.external.should_inspect(self.evm.env(), hash); - let out = if should_inspect { - // push inspector handle register. - self.evm.handler.append_handler_register_plain(inspector_handle_register); - let output = self.evm.transact(); - tracing::trace!( - target: "evm", - %hash, ?output, ?transaction, env = ?self.evm.context.evm.env, - "Executed transaction" - ); - // pop last handle register - self.evm.handler.pop_handle_register(); - output - } else { - // Main execution without needing the hash - self.evm.transact() - }; - - out.map_err(move |e| { - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { hash: transaction.recalculate_hash(), error: e.into() } - .into() - }) - } - - /// Execute the block, verify gas usage and apply post-block state changes. - pub(crate) fn execute_inner( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result, BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - self.apply_beacon_root_contract_call(block)?; - let (receipts, cumulative_gas_used) = self.execute_transactions(block, total_difficulty)?; - - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - let time = Instant::now(); - self.apply_post_execution_state_change(block, total_difficulty)?; - self.stats.apply_post_execution_state_changes_duration += time.elapsed(); - - let time = Instant::now(); - let retention = self.batch_record.bundle_retention(block.number); - self.db_mut().merge_transitions(retention); - self.stats.merge_transitions_duration += time.elapsed(); - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - Ok(receipts) - } -} - -/// Default Ethereum implementation of the [BlockExecutor] trait for the [EVMProcessor]. -#[cfg(not(feature = "optimism"))] -impl<'a, EvmConfig> BlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - // execute block - let receipts = self.execute_inner(block, total_difficulty)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - let time = Instant::now(); - if let Err(error) = - verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) - { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - self.stats.receipt_root_duration += time.elapsed(); - } - - self.batch_record.save_receipts(receipts)?; - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((Vec::new(), 0)) - } - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - let time = Instant::now(); - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, *sender)?; - trace!( - target: "evm", - ?transaction, ?result, ?state, - "Executed transaction" - ); - self.stats.execution_duration += time.elapsed(); - let time = Instant::now(); - - self.db_mut().commit(state); - - self.stats.apply_state_duration += time.elapsed(); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(Into::into).collect(), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - self.stats.log_debug(); - BundleStateWithReceipts::new( - self.evm.context.evm.db.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } - - fn size_hint(&self) -> Option { - Some(self.evm.context.evm.db.bundle_size_hint()) - } -} - -impl<'a, EvmConfig> PrunableBlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } -} - -/// Calculate the receipts root, and copmare it against against the expected receipts root and logs -/// bloom. -pub fn verify_receipt<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also copmare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{StateProviderTest, TestEvmConfig}; - use reth_primitives::{ - bytes, - constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, - keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TxEip1559, MAINNET, - }; - use revm::{Database, TransitionState}; - use std::collections::HashMap; - - static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); - - fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let beacon_root_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(BEACON_ROOT_CONTRACT_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - BEACON_ROOTS_ADDRESS, - beacon_root_contract_account, - Some(BEACON_ROOT_CONTRACT_CODE.clone()), - HashMap::new(), - ); - - db - } - - #[test] - fn eip_4788_non_genesis_call() { - let mut header = - Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - // execute invalid header (no parent beacon block root) - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - - // attempt to execute a block without parent beacon block root, expect err - let err = executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect_err( - "Executing cancun block without parent beacon block root field should fail", - ); - assert_eq!( - err, - BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) - ); - - // fix header, set a gas limit - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = - executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .db_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist"); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn eip_4788_no_code_cancun() { - // This test ensures that we "silently fail" when cancun is active and there is no code at - // BEACON_ROOTS_ADDRESS - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = StateProviderTest::default(); - - // DON'T deploy the contract at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - executor.init_env(&header, U256::ZERO); - - // get the env - let previous_env = executor.evm.context.evm.env.clone(); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the env has not changed - assert_eq!(executor.evm.context.evm.env, previous_env); - } - - #[test] - fn eip_4788_empty_account_call() { - // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account - // during the pre-block call - - let mut db = create_state_provider_with_beacon_root_contract(); - - // insert an empty SYSTEM_ADDRESS - db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::new()); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - - // construct the header for block one - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - executor.init_env(&header, U256::ZERO); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the nonce of the system address account has not changed - let nonce = executor.db_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; - assert_eq!(nonce, 0); - } - - #[test] - fn eip_4788_genesis_call() { - let db = create_state_provider_with_beacon_root_contract(); - - // activate cancun at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header(); - - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - executor.init_env(&header, U256::ZERO); - - // attempt to execute the genesis block with non-zero parent beacon block root, expect err - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let _err = executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect_err( - "Executing genesis cancun block with non-zero parent beacon block root field should fail", - ); - - // fix header - header.parent_beacon_block_root = Some(B256::ZERO); - - // now try to process the genesis block again, this time ensuring that a system contract - // call does not occur - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // there is no system contract call so there should be NO STORAGE CHANGES - // this means we'll check the transition state - let state = executor.evm.context.evm.inner.db; - let transition_state = - state.transition_state.expect("the evm should be initialized with bundle updates"); - - // assert that it is the default (empty) transition state - assert_eq!(transition_state, TransitionState::default()); - } - - #[test] - fn eip_4788_high_base_fee() { - // This test ensures that if we have a base fee, then we don't return an error when the - // system contract is called, due to the gas price being less than the base fee. - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - // execute header - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - executor.init_env(&header, U256::ZERO); - - // ensure that the env is configured with a base fee - assert_eq!(executor.evm.block().basefee, U256::from(u64::MAX)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = - executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .db_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .unwrap(); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn test_transact_error_includes_correct_hash() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let db = StateProviderTest::default(); - let chain_id = chain_spec.chain.id(); - - // execute header - let evm_config = TestEvmConfig::default(); - let mut executor = - EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db), &evm_config); - - // Create a test transaction that gonna fail - let transaction = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce: 1, - gas_limit: 21_000, - to: Address::ZERO.into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, - ..Default::default() - }), - Signature::default(), - ); - - let result = executor.transact(&transaction, Address::random()); - - let expected_hash = transaction.recalculate_hash(); - - // Check the error - match result { - Err(BlockExecutionError::Validation(BlockValidationError::EVM { hash, error: _ })) => { - assert_eq!(hash, expected_hash, "The EVM error does not include the correct transaction hash."); - }, - _ => panic!("Expected a BlockExecutionError::Validation error, but transaction did not fail as expected."), - } - } -} diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index ef79a7ed3..7e198c998 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -44,7 +44,8 @@ tracing.workspace = true reth-beacon-consensus.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 51ba2f145..4b95d11ed 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -2,7 +2,7 @@ use crate::utils::launch_auth; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; -use reth_node_ethereum::EthEngineTypes; +use reth_ethereum_engine_primitives::EthEngineTypes; use reth_primitives::{Block, U64}; use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index c11801442..403e12a1b 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,6 +1,7 @@ use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::MAINNET; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 81788f0a3..513c7da13 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -29,6 +29,8 @@ revm-inspectors.workspace = true reth-evm.workspace = true reth-network-types.workspace = true +reth-evm-optimism = { workspace = true, optional = true } + # eth alloy-rlp.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } @@ -89,4 +91,6 @@ optimism = [ "reth-primitives/optimism", "reth-rpc-types-compat/optimism", "reth-provider/optimism", + "dep:reth-evm-optimism", + "reth-evm-optimism/optimism", ] diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index 95b6b6bc7..cfc3fe058 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -84,7 +84,7 @@ where #[cfg(feature = "optimism")] let (block_timestamp, l1_block_info) = { - let body = reth_revm::optimism::extract_l1_info(&block); + let body = reth_evm_optimism::extract_l1_info(&block); (block.timestamp, body.ok()) }; diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 51bde5bfa..75470e1fe 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -49,17 +49,9 @@ use revm::{ }; use std::future::Future; -#[cfg(feature = "optimism")] -use crate::eth::api::optimism::OptimismTxMeta; -#[cfg(feature = "optimism")] -use crate::eth::optimism::OptimismEthApiError; use crate::eth::revm_utils::FillableTransaction; #[cfg(feature = "optimism")] -use reth_revm::optimism::RethL1BlockInfo; -#[cfg(feature = "optimism")] use reth_rpc_types::OptimismTransactionReceiptFields; -#[cfg(feature = "optimism")] -use revm::L1BlockInfo; use revm_primitives::db::{Database, DatabaseRef}; /// Helper alias type for the state's [CacheDB] @@ -1498,7 +1490,7 @@ where .ok_or(EthApiError::UnknownBlockNumber)?; let block = block.unseal(); - let l1_block_info = reth_revm::optimism::extract_l1_info(&block).ok(); + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; build_transaction_receipt_with_block_receipts( @@ -1510,17 +1502,19 @@ where ) } - /// Builds [OptimismTxMeta] object using the provided [TransactionSigned], - /// [L1BlockInfo] and `block_timestamp`. The [L1BlockInfo] is used to calculate - /// the l1 fee and l1 data gas for the transaction. - /// If the [L1BlockInfo] is not provided, the [OptimismTxMeta] will be empty. + /// Builds op metadata object using the provided [TransactionSigned], L1 block info and + /// `block_timestamp`. The L1BlockInfo is used to calculate the l1 fee and l1 data gas for the + /// transaction. If the L1BlockInfo is not provided, the meta info will be empty. #[cfg(feature = "optimism")] pub(crate) fn build_op_tx_meta( &self, tx: &TransactionSigned, - l1_block_info: Option, + l1_block_info: Option, block_timestamp: u64, - ) -> EthResult { + ) -> EthResult { + use crate::eth::{api::optimism::OptimismTxMeta, optimism::OptimismEthApiError}; + use reth_evm_optimism::RethL1BlockInfo; + let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; let (l1_fee, l1_data_gas) = if !tx.is_deposit() { @@ -1711,7 +1705,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( meta: TransactionMeta, receipt: Receipt, all_receipts: &[Receipt], - #[cfg(feature = "optimism")] optimism_tx_meta: OptimismTxMeta, + #[cfg(feature = "optimism")] optimism_tx_meta: crate::eth::api::optimism::OptimismTxMeta, ) -> EthResult { // Note: we assume this transaction is valid, because it's mined (or part of pending block) and // we don't need to check for pre EIP-2 diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index f3bd16a5e..ef91b2be2 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -24,6 +24,8 @@ reth-etl.workspace = true reth-config.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } reth-consensus.workspace = true +reth-evm.workspace = true +reth-revm.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 92c2b3a09..2c6aaff25 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -16,7 +16,7 @@ //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; -//! # use reth_revm::EvmProcessorFactory; +//! # use reth_evm_ethereum::execute::EthExecutorProvider; //! # use reth_primitives::{MAINNET, B256, PruneModes}; //! # use reth_network_types::PeerId; //! # use reth_stages::Pipeline; @@ -45,7 +45,7 @@ //! # provider_factory.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); -//! # let executor_factory = EvmProcessorFactory::new(chain_spec.clone(), EthEvmConfig::default()); +//! # let executor_provider = EthExecutorProvider::mainnet(); //! # let static_file_producer = StaticFileProducer::new( //! # provider_factory.clone(), //! # provider_factory.static_file_provider(), @@ -55,17 +55,15 @@ //! # let pipeline = //! Pipeline::builder() //! .with_tip_sender(tip_tx) -//! .add_stages( -//! DefaultStages::new( -//! provider_factory.clone(), -//! HeaderSyncMode::Tip(tip_rx), -//! consensus, -//! headers_downloader, -//! bodies_downloader, -//! executor_factory, -//! EtlConfig::default(), -//! ) -//! ) +//! .add_stages(DefaultStages::new( +//! provider_factory.clone(), +//! HeaderSyncMode::Tip(tip_rx), +//! consensus, +//! headers_downloader, +//! bodies_downloader, +//! executor_provider, +//! EtlConfig::default(), +//! )) //! .build(provider_factory, static_file_producer); //! ``` //! diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 99edf05b7..7ec85170f 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -12,44 +12,29 @@ //! ```no_run //! # use reth_stages::Pipeline; //! # use reth_stages::sets::{OfflineStages}; -//! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PruneModes, MAINNET}; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; +//! # use reth_evm::execute::BlockExecutorProvider; //! -//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); -//! # let provider_factory = create_test_provider_factory(); -//! # let static_file_producer = StaticFileProducer::new( +//! # fn create(exec: impl BlockExecutorProvider) { +//! +//! let provider_factory = create_test_provider_factory(); +//! let static_file_producer = StaticFileProducer::new( //! provider_factory.clone(), //! provider_factory.static_file_provider(), //! PruneModes::default(), //! ); //! // Build a pipeline with all offline stages. -//! # let pipeline = Pipeline::builder() -//! .add_stages(OfflineStages::new(executor_factory, EtlConfig::default())) +//! let pipeline = Pipeline::builder() +//! .add_stages(OfflineStages::new(exec, EtlConfig::default())) //! .build(provider_factory, static_file_producer); -//! ``` //! -//! ```ignore -//! # use reth_stages::Pipeline; -//! # use reth_stages::{StageSet, sets::OfflineStages}; -//! # use reth_revm::EvmProcessorFactory; -//! # use reth_node_ethereum::EthEvmConfig; -//! # use reth_primitives::MAINNET; -//! # use reth_config::config::EtlConfig; -//! -//! // Build a pipeline with all offline stages and a custom stage at the end. -//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); -//! Pipeline::builder() -//! .add_stages( -//! OfflineStages::new(executor_factory, EtlConfig::default()).builder().add_stage(MyCustomStage) -//! ) -//! .build(); +//! # } //! ``` - use crate::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, @@ -61,10 +46,11 @@ use crate::{ use reth_config::config::EtlConfig; use reth_consensus::Consensus; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, }; -use reth_provider::{ExecutorFactory, HeaderSyncGapProvider, HeaderSyncMode}; +use reth_provider::{HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; /// A set containing all stages to run a fully syncing instance of reth. @@ -98,7 +84,7 @@ pub struct DefaultStages { etl_config: EtlConfig, } -impl DefaultStages { +impl DefaultStages { /// Create a new set of default stages with default values. pub fn new( provider: Provider, @@ -106,11 +92,11 @@ impl DefaultStages { consensus: Arc, header_downloader: H, body_downloader: B, - executor_factory: EF, + executor_factory: E, etl_config: EtlConfig, ) -> Self where - EF: ExecutorFactory, + E: BlockExecutorProvider, { Self { online: OnlineStages::new( @@ -127,14 +113,14 @@ impl DefaultStages { } } -impl DefaultStages +impl DefaultStages where - EF: ExecutorFactory, + E: BlockExecutorProvider, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( default_offline: StageSetBuilder, - executor_factory: EF, + executor_factory: E, etl_config: EtlConfig, ) -> StageSetBuilder { StageSetBuilder::default() @@ -144,12 +130,12 @@ where } } -impl StageSet for DefaultStages +impl StageSet for DefaultStages where Provider: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, - EF: ExecutorFactory, + E: BlockExecutorProvider, DB: Database + 'static, { fn builder(self) -> StageSetBuilder { @@ -269,7 +255,11 @@ impl OfflineStages { } } -impl StageSet for OfflineStages { +impl StageSet for OfflineStages +where + E: BlockExecutorProvider, + DB: Database, +{ fn builder(self) -> StageSetBuilder { ExecutionStages::new(self.executor_factory) .builder() @@ -281,23 +271,27 @@ impl StageSet for OfflineStages { /// A set containing all stages that are required to execute pre-existing block data. #[derive(Debug)] #[non_exhaustive] -pub struct ExecutionStages { +pub struct ExecutionStages { /// Executor factory that will create executors. - executor_factory: EF, + executor_factory: E, } -impl ExecutionStages { +impl ExecutionStages { /// Create a new set of execution stages with default values. - pub fn new(executor_factory: EF) -> Self { + pub fn new(executor_factory: E) -> Self { Self { executor_factory } } } -impl StageSet for ExecutionStages { +impl StageSet for ExecutionStages +where + DB: Database, + E: BlockExecutorProvider, +{ fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(SenderRecoveryStage::default()) - .add_stage(ExecutionStage::new_with_factory(self.executor_factory)) + .add_stage(ExecutionStage::new_with_executor(self.executor_factory)) } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 1771e2570..0db907211 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -3,6 +3,7 @@ use num_traits::Zero; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; +use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{ stage::{ @@ -12,9 +13,10 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, Chain, DatabaseProviderRW, ExecutorFactory, HeaderProvider, + BlockReader, BundleStateWithReceipts, Chain, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, }; +use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, UnwindInput, UnwindOutput, @@ -59,10 +61,10 @@ use tracing::*; /// to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage { metrics_tx: Option, - /// The stage's internal executor - executor_factory: EF, + /// The stage's internal block executor + executor_provider: E, /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, /// The highest threshold (in number of blocks) for switching between incremental @@ -76,10 +78,10 @@ pub struct ExecutionStage { exex_manager_handle: ExExManagerHandle, } -impl ExecutionStage { +impl ExecutionStage { /// Create new execution stage with specified config. pub fn new( - executor_factory: EF, + executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, @@ -88,19 +90,19 @@ impl ExecutionStage { Self { metrics_tx: None, external_clean_threshold, - executor_factory, + executor_provider, thresholds, prune_modes, exex_manager_handle, } } - /// Create an execution stage with the provided executor factory. + /// Create an execution stage with the provided executor. /// /// The commit threshold will be set to 10_000. - pub fn new_with_factory(executor_factory: EF) -> Self { + pub fn new_with_executor(executor_provider: E) -> Self { Self::new( - executor_factory, + executor_provider, ExecutionStageThresholds::default(), MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), @@ -144,7 +146,10 @@ impl ExecutionStage { } } -impl ExecutionStage { +impl ExecutionStage +where + E: BlockExecutorProvider, +{ /// Execute the stage. pub fn execute_inner( &mut self, @@ -169,12 +174,11 @@ impl ExecutionStage { None }; - // Build executor - let mut executor = self.executor_factory.with_state(LatestStateProviderRef::new( + let db = StateProviderDatabase(LatestStateProviderRef::new( provider.tx_ref(), provider.static_file_provider().clone(), )); - executor.set_prune_modes(prune_modes); + let mut executor = self.executor_provider.batch_executor(db, prune_modes); executor.set_tip(max_block); // Progress tracking @@ -213,7 +217,8 @@ impl ExecutionStage { // Execute the block let execute_start = Instant::now(); - executor.execute_and_verify_receipt(&block, td).map_err(|error| StageError::Block { + + executor.execute_one((&block, td).into()).map_err(|error| StageError::Block { block: Box::new(block.header.clone().seal_slow()), error: BlockErrorKind::Execution(error), })?; @@ -245,7 +250,8 @@ impl ExecutionStage { } } let time = Instant::now(); - let state = executor.take_output_state(); + let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + let state = BundleStateWithReceipts::new(bundle, receipts, first_block); let write_preparation_duration = time.elapsed(); // Check if we should send a [`ExExNotification`] to execution extensions. @@ -383,7 +389,11 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -impl Stage for ExecutionStage { +impl Stage for ExecutionStage +where + DB: Database, + E: BlockExecutorProvider, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::Execution @@ -609,7 +619,7 @@ mod tests { use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_db::{models::AccountBeforeTx, transaction::DbTxMut}; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::executor::BlockValidationError; use reth_primitives::{ address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Address, @@ -620,16 +630,14 @@ mod tests { test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, StaticFileProviderFactory, }; - use reth_revm::EvmProcessorFactory; use std::collections::BTreeMap; - fn stage() -> ExecutionStage> { - let executor_factory = EvmProcessorFactory::new( - Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()), - EthEvmConfig::default(), - ); + fn stage() -> ExecutionStage { + let executor_provider = EthExecutorProvider::ethereum(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )); ExecutionStage::new( - executor_factory, + executor_provider, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, @@ -864,7 +872,7 @@ mod tests { mode.receipts_log_filter = random_filter.clone(); } - let mut execution_stage: ExecutionStage> = stage(); + let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); let output = execution_stage.execute(&provider, input).unwrap(); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index a40da1c49..7bb88ff96 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -50,7 +50,7 @@ mod tests { transaction::{DbTx, DbTxMut}, AccountsHistory, DatabaseEnv, }; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex::ExExManagerHandle; use reth_interfaces::test_utils::generators::{self, random_block}; use reth_primitives::{ @@ -61,7 +61,6 @@ mod tests { providers::StaticFileWriter, AccountExtReader, ProviderFactory, ReceiptProvider, StorageReader, }; - use reth_revm::EvmProcessorFactory; use reth_stages_api::{ExecInput, Stage}; use std::sync::Arc; @@ -140,10 +139,9 @@ mod tests { // Check execution and create receipts and changesets according to the pruning // configuration let mut execution_stage = ExecutionStage::new( - EvmProcessorFactory::new( - Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()), - EthEvmConfig::default(), - ), + EthExecutorProvider::ethereum(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )), ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index baf5fa597..a57f18f11 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -4,6 +4,7 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; +use reth_evm::execute::BatchBlockExecutionOutput; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ logs_bloom, @@ -34,6 +35,22 @@ pub struct BundleStateWithReceipts { first_block: BlockNumber, } +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BundleStateWithReceipts { + fn from(value: BatchBlockExecutionOutput) -> Self { + let BatchBlockExecutionOutput { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BatchBlockExecutionOutput { + fn from(value: BundleStateWithReceipts) -> Self { + let BundleStateWithReceipts { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + /// Type used to initialize revms bundle state. pub type BundleStateInit = HashMap, Option, HashMap)>; diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 31edf4f03..d2c016add 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -19,7 +19,7 @@ use reth::{ }; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEvmConfig, EthereumNode}; +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -106,7 +106,7 @@ impl ConfigureEvm for MyEvmConfig { } } -/// A regular ethereum evm and executor builder. +/// Builds a regular ethereum block executor that uses the custom EVM. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] pub struct MyExecutorBuilder; @@ -116,9 +116,16 @@ where Node: FullNodeTypes, { type EVM = MyEvmConfig; - - async fn build_evm(self, _ctx: &BuilderContext) -> eyre::Result { - Ok(MyEvmConfig::default()) + type Executor = EthExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + Ok(( + MyEvmConfig::default(), + EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::default()), + )) } } diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 3f2193227..2584c42d6 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -22,12 +22,12 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true reth-interfaces.workspace = true reth-revm.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true alloy-rlp.workspace = true -tokio = "1.28.1" +tokio.workspace = true walkdir = "2.3.3" -serde = "1.0.163" +serde.workspace = true serde_json.workspace = true thiserror.workspace = true rayon.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 424603cb4..27f62f886 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_rlp::Decodable; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{providers::StaticFileWriter, HashingWriter, ProviderFactory}; use reth_stages::{stages::ExecutionStage, ExecInput, Stage}; @@ -136,10 +135,11 @@ impl Case for BlockchainTestCase { // Execute the execution stage using the EVM processor factory for the test case // network. - let _ = ExecutionStage::new_with_factory(reth_revm::EvmProcessorFactory::new( - Arc::new(case.network.clone().into()), - EthEvmConfig::default(), - )) + let _ = ExecutionStage::new_with_executor( + reth_evm_ethereum::execute::EthExecutorProvider::ethereum(Arc::new( + case.network.clone().into(), + )), + ) .execute( &provider, ExecInput { target: last_block.as_ref().map(|b| b.number), checkpoint: None }, From 90f3161256f2dbfafedbc4f71266887ecfd41116 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 14:15:04 +0200 Subject: [PATCH 184/250] chore: remove InspectorStack (#8073) --- Cargo.lock | 1 - crates/ethereum/evm/src/execute.rs | 39 +---- crates/node-core/src/args/debug.rs | 33 +--- crates/node-ethereum/src/node.rs | 3 +- crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/builder/mod.rs | 23 --- crates/optimism/evm/src/execute.rs | 39 +---- crates/optimism/node/src/node.rs | 3 +- crates/revm/src/lib.rs | 5 - crates/revm/src/stack.rs | 202 ------------------------- 10 files changed, 15 insertions(+), 334 deletions(-) delete mode 100644 crates/revm/src/stack.rs diff --git a/Cargo.lock b/Cargo.lock index b2e179a95..a3f0450ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7278,7 +7278,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", - "reth-revm", "reth-rpc", "reth-rpc-engine-api", "reth-stages", diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index c3dd315f7..b65e7be17 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -20,7 +20,6 @@ use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; @@ -36,7 +35,6 @@ use tracing::debug; pub struct EthExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, - inspector: Option, } impl EthExecutorProvider { @@ -54,13 +52,7 @@ impl EthExecutorProvider { impl EthExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { chain_spec, evm_config } } } @@ -78,7 +70,6 @@ where self.evm_config.clone(), State::builder().with_database(db).with_bundle_update().without_state_clear().build(), ) - .with_inspector(self.inspector.clone()) } } @@ -221,20 +212,12 @@ pub struct EthBlockExecutor { executor: EthEvmExecutor, /// The state to use for execution state: State, - /// Optional inspector stack for debugging - inspector: Option, } impl EthBlockExecutor { /// Creates a new Ethereum block executor. pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { executor: EthEvmExecutor { chain_spec, evm_config }, state } } #[inline] @@ -292,19 +275,9 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + self.executor.execute_pre_and_transactions(block, evm) + }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; @@ -507,7 +480,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } + EthExecutorProvider { chain_spec, evm_config: Default::default() } } #[test] diff --git a/crates/node-core/src/args/debug.rs b/crates/node-core/src/args/debug.rs index 3eda71ad0..d1c4e9b73 100644 --- a/crates/node-core/src/args/debug.rs +++ b/crates/node-core/src/args/debug.rs @@ -1,7 +1,7 @@ //! clap [Args](clap::Args) for debugging purposes use clap::Args; -use reth_primitives::{TxHash, B256}; +use reth_primitives::B256; use std::path::PathBuf; /// Parameters for debugging purposes @@ -28,37 +28,6 @@ pub struct DebugArgs { #[arg(long = "debug.max-block", help_heading = "Debug")] pub max_block: Option, - /// Print opcode level traces directly to console during execution. - #[arg(long = "debug.print-inspector", help_heading = "Debug")] - pub print_inspector: bool, - - /// Hook on a specific block during execution. - #[arg( - long = "debug.hook-block", - help_heading = "Debug", - conflicts_with = "hook_transaction", - conflicts_with = "hook_all" - )] - pub hook_block: Option, - - /// Hook on a specific transaction during execution. - #[arg( - long = "debug.hook-transaction", - help_heading = "Debug", - conflicts_with = "hook_block", - conflicts_with = "hook_all" - )] - pub hook_transaction: Option, - - /// Hook on every transaction in a block. - #[arg( - long = "debug.hook-all", - help_heading = "Debug", - conflicts_with = "hook_block", - conflicts_with = "hook_transaction" - )] - pub hook_all: bool, - /// If provided, the engine will skip `n` consecutive FCUs. #[arg(long = "debug.skip-fcu", help_heading = "Debug")] pub skip_fcu: Option, diff --git a/crates/node-ethereum/src/node.rs b/crates/node-ethereum/src/node.rs index 235130b42..87bc54d15 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/node-ethereum/src/node.rs @@ -85,8 +85,7 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = EthEvmConfig::default(); - let executor = - EthExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + let executor = EthExecutorProvider::new(chain_spec, evm_config); Ok((evm_config, executor)) } diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 68c1d5f0c..26635e536 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -19,7 +19,6 @@ reth-blockchain-tree.workspace = true reth-exex.workspace = true reth-evm.workspace = true reth-provider.workspace = true -reth-revm.workspace = true reth-db.workspace = true reth-rpc-engine-api.workspace = true reth-rpc.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 0457bbe3e..b6f0a191e 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -27,7 +27,6 @@ use reth_node_core::{ }; use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; -use reth_revm::stack::{InspectorStack, InspectorStackConfig}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; pub use states::*; @@ -461,28 +460,6 @@ impl BuilderContext { &self.config } - /// Returns an inspector stack if configured. - /// - /// This can be used to debug block execution. - pub fn inspector_stack(&self) -> Option { - use reth_revm::stack::Hook; - let stack_config = InspectorStackConfig { - use_printer_tracer: self.config.debug.print_inspector, - hook: if let Some(hook_block) = self.config.debug.hook_block { - Hook::Block(hook_block) - } else if let Some(tx) = self.config.debug.hook_transaction { - Hook::Transaction(tx) - } else if self.config.debug.hook_all { - Hook::All - } else { - // no inspector - return None - }, - }; - - Some(InspectorStack::new(stack_config)) - } - /// Returns the data dir of the node. /// /// This gives access to all relevant files and directories of the node's datadir. diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 2ea32782c..d19d441a8 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -19,7 +19,6 @@ use reth_primitives::{ use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; @@ -35,7 +34,6 @@ use tracing::{debug, trace}; pub struct OpExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, - inspector: Option, } impl OpExecutorProvider { @@ -48,13 +46,7 @@ impl OpExecutorProvider { impl OpExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { chain_spec, evm_config } } } @@ -72,7 +64,6 @@ where self.evm_config.clone(), State::builder().with_database(db).with_bundle_update().without_state_clear().build(), ) - .with_inspector(self.inspector.clone()) } } @@ -268,20 +259,12 @@ pub struct OpBlockExecutor { executor: OpEvmExecutor, /// The state to use for execution state: State, - /// Optional inspector stack for debugging - inspector: Option, } impl OpBlockExecutor { /// Creates a new Ethereum block executor. pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } } #[inline] @@ -337,19 +320,9 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + self.executor.execute_pre_and_transactions(block, evm) + }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; @@ -548,7 +521,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { chain_spec, evm_config: Default::default(), inspector: None } + OpExecutorProvider { chain_spec, evm_config: Default::default() } } #[test] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index a2cbc287c..7d715fece 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -105,8 +105,7 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = OptimismEvmConfig::default(); - let executor = - OpExecutorProvider::new(chain_spec, evm_config).with_inspector(ctx.inspector_stack()); + let executor = OpExecutorProvider::new(chain_spec, evm_config); Ok((evm_config, executor)) } diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index d8c5761d0..375b230ab 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -19,11 +19,6 @@ pub mod state_change; /// Ethereum DAO hardfork state change data. pub mod eth_dao_fork; -/// An inspector stack abstracting the implementation details of -/// each inspector and allowing to hook on block/transaction execution, -/// used in the main Reth executor. -pub mod stack; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/revm/src/stack.rs b/crates/revm/src/stack.rs deleted file mode 100644 index 8f8bfa5ce..000000000 --- a/crates/revm/src/stack.rs +++ /dev/null @@ -1,202 +0,0 @@ -use revm::{ - inspectors::CustomPrintTracer, - interpreter::{CallInputs, CallOutcome, CreateInputs, CreateOutcome, Interpreter}, - primitives::{Address, Env, Log, B256, U256}, - Database, EvmContext, Inspector, -}; -use std::fmt::Debug; - -/// A hook to inspect the execution of the EVM. -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub enum Hook { - /// No hook. - #[default] - None, - /// Hook on a specific block. - Block(u64), - /// Hook on a specific transaction hash. - Transaction(B256), - /// Hooks on every transaction in a block. - All, -} - -impl Hook { - /// Returns `true` if this hook should be used. - #[inline] - pub fn is_enabled(&self, block_number: u64, tx_hash: &B256) -> bool { - match self { - Hook::None => false, - Hook::Block(block) => block_number == *block, - Hook::Transaction(hash) => hash == tx_hash, - Hook::All => true, - } - } -} - -/// An inspector that calls multiple inspectors in sequence. -#[derive(Clone, Default)] -pub struct InspectorStack { - /// An inspector that prints the opcode traces to the console. - pub custom_print_tracer: Option, - /// The provided hook - pub hook: Hook, -} - -impl Debug for InspectorStack { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InspectorStack") - .field("custom_print_tracer", &self.custom_print_tracer.is_some()) - .field("hook", &self.hook) - .finish() - } -} - -impl InspectorStack { - /// Creates a new inspector stack with the given configuration. - #[inline] - pub fn new(config: InspectorStackConfig) -> Self { - Self { - hook: config.hook, - custom_print_tracer: config.use_printer_tracer.then(Default::default), - } - } - - /// Returns `true` if this inspector should be used. - #[inline] - pub fn should_inspect(&self, env: &Env, tx_hash: &B256) -> bool { - self.custom_print_tracer.is_some() && - self.hook.is_enabled(env.block.number.saturating_to(), tx_hash) - } -} - -/// Configuration for the inspectors. -#[derive(Clone, Copy, Debug, Default)] -pub struct InspectorStackConfig { - /// Enable revm inspector printer. - /// In execution this will print opcode level traces directly to console. - pub use_printer_tracer: bool, - - /// Hook on a specific block or transaction. - pub hook: Hook, -} - -/// Helper macro to call the same method on multiple inspectors without resorting to dynamic -/// dispatch. -#[macro_export] -macro_rules! call_inspectors { - ([$($inspector:expr),+ $(,)?], |$id:ident $(,)?| $call:expr $(,)?) => {{$( - if let Some($id) = $inspector { - $call - } - )+}} -} - -impl Inspector for InspectorStack -where - DB: Database, -{ - #[inline] - fn initialize_interp(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.initialize_interp(interp, context); - }); - } - - #[inline] - fn step(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.step(interp, context); - }); - } - - #[inline] - fn step_end(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.step_end(interp, context); - }); - } - - #[inline] - fn log(&mut self, context: &mut EvmContext, log: &Log) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.log(context, log); - }); - } - - #[inline] - fn call( - &mut self, - context: &mut EvmContext, - inputs: &mut CallInputs, - ) -> Option { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - if let Some(outcome) = inspector.call(context, inputs) { - return Some(outcome) - } - }); - - None - } - - #[inline] - fn call_end( - &mut self, - context: &mut EvmContext, - inputs: &CallInputs, - outcome: CallOutcome, - ) -> CallOutcome { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - let new_ret = inspector.call_end(context, inputs, outcome.clone()); - - // If the inspector returns a different ret or a revert with a non-empty message, - // we assume it wants to tell us something - if new_ret != outcome { - return new_ret - } - }); - - outcome - } - - #[inline] - fn create( - &mut self, - context: &mut EvmContext, - inputs: &mut CreateInputs, - ) -> Option { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - if let Some(out) = inspector.create(context, inputs) { - return Some(out) - } - }); - - None - } - - #[inline] - fn create_end( - &mut self, - context: &mut EvmContext, - inputs: &CreateInputs, - outcome: CreateOutcome, - ) -> CreateOutcome { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - let new_ret = inspector.create_end(context, inputs, outcome.clone()); - - // If the inspector returns a different ret or a revert with a non-empty message, - // we assume it wants to tell us something - if new_ret != outcome { - return new_ret - } - }); - - outcome - } - - #[inline] - fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - Inspector::::selfdestruct(inspector, contract, target, value); - }); - } -} From f20e4cbad8ae82aed527ecd809b1e9a553f46e6a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 May 2024 14:38:26 +0200 Subject: [PATCH 185/250] debt(discv5): discv5 integration into network (#8065) --- bin/reth/src/commands/p2p/mod.rs | 6 +++++- crates/net/discv4/src/lib.rs | 10 ---------- crates/net/discv5/src/config.rs | 12 +++++++++++- crates/net/discv5/src/lib.rs | 5 +++-- crates/net/network/src/config.rs | 16 ---------------- crates/node-core/src/args/network.rs | 14 +++----------- crates/node-core/src/node_config.rs | 7 ++++++- 7 files changed, 28 insertions(+), 42 deletions(-) diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 1cc5d4f88..18cc6aba8 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -126,6 +126,7 @@ impl Command { let mut network_config_builder = config .network_config(self.nat, None, p2p_secret_key) .chain_spec(self.chain.clone()) + .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(self.chain.bootnodes().unwrap_or_default()); network_config_builder = self.discovery.apply_to_builder(network_config_builder); @@ -136,7 +137,10 @@ impl Command { data_dir.static_files(), )?)); - if self.discovery.enable_discv5_discovery { + if !self.discovery.disable_discovery && + (self.discovery.enable_discv5_discovery || + network_config.chain_spec.chain.is_optimism()) + { network_config = network_config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { discv5_addr, diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 9a0cb9c11..77cc309eb 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -94,16 +94,6 @@ pub const DEFAULT_DISCOVERY_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); /// Note: the default TCP port is the same. pub const DEFAULT_DISCOVERY_PORT: u16 = 30303; -/// The default address for discv5 via UDP. -/// -/// Note: the default TCP address is the same. -pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - -/// The default port for discv5 via UDP. -/// -/// Default is port 9000. -pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; - /// The default address for discv4 via UDP: "0.0.0.0:30303" /// /// Note: The default TCP address is the same. diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 05c2863c8..da7e58cb7 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -3,7 +3,7 @@ use std::{ collections::HashSet, fmt::Debug, - net::{IpAddr, SocketAddr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, }; use derive_more::Display; @@ -13,6 +13,16 @@ use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; +/// The default address for discv5 via UDP. +/// +/// Default is 0.0.0.0, all interfaces. See [`discv5::ListenConfig`] default. +pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + +/// The default port for discv5 via UDP. +/// +/// Default is port 9000. See [`discv5::ListenConfig`] default. +pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; + /// Default interval in seconds at which to run a lookup up query. /// /// Default is 60 seconds. diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index e9bc79dce..a5ac1d808 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -38,8 +38,9 @@ pub mod network_stack_id; pub use discv5::{self, IpMode}; pub use config::{ - BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, - DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, + BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, + DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + DEFAULT_SECONDS_LOOKUP_INTERVAL, }; pub use enr::enr_to_discv4_id; pub use error::Error; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 9e898014f..40d88f991 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -176,8 +176,6 @@ pub struct NetworkConfigBuilder { dns_discovery_config: Option, /// How to set up discovery version 4. discovery_v4_builder: Option, - /// Whether to enable discovery version 5. Disabled by default. - enable_discovery_v5: bool, /// All boot nodes to start network discovery with. boot_nodes: HashSet, /// Address to use for discovery @@ -220,7 +218,6 @@ impl NetworkConfigBuilder { secret_key, dns_discovery_config: Some(Default::default()), discovery_v4_builder: Some(Default::default()), - enable_discovery_v5: false, boot_nodes: Default::default(), discovery_addr: None, listener_addr: None, @@ -353,12 +350,6 @@ impl NetworkConfigBuilder { self } - /// Allows discv5 discovery. - pub fn discovery_v5(mut self) -> Self { - self.enable_discovery_v5 = true; - self - } - /// Sets the dns discovery config to use. pub fn dns_discovery(mut self, config: DnsDiscoveryConfig) -> Self { self.dns_discovery_config = Some(config); @@ -407,12 +398,6 @@ impl NetworkConfigBuilder { self } - /// Enable the Discv5 discovery. - pub fn enable_discv5_discovery(mut self) -> Self { - self.enable_discovery_v5 = true; - self - } - /// Disable the DNS discovery if the given condition is true. pub fn disable_dns_discovery_if(self, disable: bool) -> Self { if disable { @@ -469,7 +454,6 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, - enable_discovery_v5: _, boot_nodes, discovery_addr, listener_addr, diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index df6f8ece8..0d5206e7f 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -3,13 +3,10 @@ use crate::version::P2P_CLIENT_VERSION; use clap::Args; use reth_config::Config; -use reth_discv4::{ - DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT, DEFAULT_DISCOVERY_V5_ADDR, - DEFAULT_DISCOVERY_V5_PORT, -}; +use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ - DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, - DEFAULT_SECONDS_LOOKUP_INTERVAL, + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; use reth_network::{ @@ -272,11 +269,6 @@ impl DiscoveryArgs { network_config_builder = network_config_builder.disable_discv4_discovery(); } - if !self.disable_discovery && (self.enable_discv5_discovery || cfg!(feature = "optimism")) { - network_config_builder = network_config_builder.disable_discv4_discovery(); - network_config_builder = network_config_builder.enable_discv5_discovery(); - } - network_config_builder } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 3f149a824..a4301b804 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -462,6 +462,7 @@ impl NodeConfig { // set discovery port based on instance number self.network.port + self.instance - 1, )) + .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .discovery_addr(SocketAddr::new( self.network.discovery.addr, // set discovery port based on instance number @@ -470,9 +471,13 @@ impl NodeConfig { let config = cfg_builder.build(client); - if !self.network.discovery.enable_discv5_discovery { + if self.network.discovery.disable_discovery || + !self.network.discovery.enable_discv5_discovery && + !config.chain_spec.chain.is_optimism() + { return config } + // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { From 43599f983c1a13c90a3dfc9a8032c7ce7f9e9306 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 14:52:00 +0200 Subject: [PATCH 186/250] chore: move node-ethereum to ethereum/node (#8076) --- Cargo.toml | 4 ++-- crates/{node-ethereum => ethereum/node}/Cargo.toml | 0 crates/{node-ethereum => ethereum/node}/src/evm.rs | 0 crates/{node-ethereum => ethereum/node}/src/lib.rs | 0 crates/{node-ethereum => ethereum/node}/src/node.rs | 0 .../node}/tests/assets/genesis.json | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/blobs.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/dev.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/eth.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/main.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/p2p.rs | 0 crates/{node-ethereum => ethereum/node}/tests/e2e/utils.rs | 0 crates/{node-ethereum => ethereum/node}/tests/it/builder.rs | 0 crates/{node-ethereum => ethereum/node}/tests/it/exex.rs | 0 crates/{node-ethereum => ethereum/node}/tests/it/main.rs | 0 15 files changed, 2 insertions(+), 2 deletions(-) rename crates/{node-ethereum => ethereum/node}/Cargo.toml (100%) rename crates/{node-ethereum => ethereum/node}/src/evm.rs (100%) rename crates/{node-ethereum => ethereum/node}/src/lib.rs (100%) rename crates/{node-ethereum => ethereum/node}/src/node.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/assets/genesis.json (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/blobs.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/dev.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/eth.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/main.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/p2p.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/e2e/utils.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/it/builder.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/it/exex.rs (100%) rename crates/{node-ethereum => ethereum/node}/tests/it/main.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 28b0692dd..12b31162d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,7 +49,7 @@ members = [ "crates/rpc/rpc-types-compat/", "crates/engine-primitives/", "crates/ethereum/engine-primitives/", - "crates/node-ethereum/", + "crates/ethereum/node", "crates/node/builder/", "crates/optimism/consensus", "crates/optimism/node/", @@ -225,7 +225,7 @@ reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-node-builder = { path = "crates/node/builder" } -reth-node-ethereum = { path = "crates/node-ethereum" } +reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-optimism = { path = "crates/optimism/node" } reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } diff --git a/crates/node-ethereum/Cargo.toml b/crates/ethereum/node/Cargo.toml similarity index 100% rename from crates/node-ethereum/Cargo.toml rename to crates/ethereum/node/Cargo.toml diff --git a/crates/node-ethereum/src/evm.rs b/crates/ethereum/node/src/evm.rs similarity index 100% rename from crates/node-ethereum/src/evm.rs rename to crates/ethereum/node/src/evm.rs diff --git a/crates/node-ethereum/src/lib.rs b/crates/ethereum/node/src/lib.rs similarity index 100% rename from crates/node-ethereum/src/lib.rs rename to crates/ethereum/node/src/lib.rs diff --git a/crates/node-ethereum/src/node.rs b/crates/ethereum/node/src/node.rs similarity index 100% rename from crates/node-ethereum/src/node.rs rename to crates/ethereum/node/src/node.rs diff --git a/crates/node-ethereum/tests/assets/genesis.json b/crates/ethereum/node/tests/assets/genesis.json similarity index 100% rename from crates/node-ethereum/tests/assets/genesis.json rename to crates/ethereum/node/tests/assets/genesis.json diff --git a/crates/node-ethereum/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/blobs.rs rename to crates/ethereum/node/tests/e2e/blobs.rs diff --git a/crates/node-ethereum/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/dev.rs rename to crates/ethereum/node/tests/e2e/dev.rs diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/eth.rs rename to crates/ethereum/node/tests/e2e/eth.rs diff --git a/crates/node-ethereum/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/main.rs rename to crates/ethereum/node/tests/e2e/main.rs diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/p2p.rs rename to crates/ethereum/node/tests/e2e/p2p.rs diff --git a/crates/node-ethereum/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs similarity index 100% rename from crates/node-ethereum/tests/e2e/utils.rs rename to crates/ethereum/node/tests/e2e/utils.rs diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs similarity index 100% rename from crates/node-ethereum/tests/it/builder.rs rename to crates/ethereum/node/tests/it/builder.rs diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/ethereum/node/tests/it/exex.rs similarity index 100% rename from crates/node-ethereum/tests/it/exex.rs rename to crates/ethereum/node/tests/it/exex.rs diff --git a/crates/node-ethereum/tests/it/main.rs b/crates/ethereum/node/tests/it/main.rs similarity index 100% rename from crates/node-ethereum/tests/it/main.rs rename to crates/ethereum/node/tests/it/main.rs From 1cf65e339478c497309b43396672748a5df869cf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 May 2024 15:23:16 +0200 Subject: [PATCH 187/250] feat(op): pass unverifiable ENRs to rlpx (#8059) --- crates/net/discv5/src/lib.rs | 58 +++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index a5ac1d808..8e156dde1 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -237,19 +237,45 @@ impl Discv5 { None } discv5::Event::SessionEstablished(enr, remote_socket) => { - // covers `reth_discv4::DiscoveryUpdate` equivalents `DiscoveryUpdate::Added(_)` - // and `DiscoveryUpdate::DiscoveredAtCapacity(_) + // this branch is semantically similar to branches of + // `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and + // `DiscoveryUpdate::DiscoveredAtCapacity(_) // peer has been discovered as part of query, or, by incoming session (peer has // discovered us) - self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(&enr); - self.metrics.discovered_peers.increment_established_sessions_raw(1); self.on_discovered_peer(&enr, remote_socket) } - _ => None, + discv5::Event::UnverifiableEnr { + enr, + socket, + node_id: _, + } => { + // this branch is semantically similar to branches of + // `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and + // `DiscoveryUpdate::DiscoveredAtCapacity(_) + + // peer has been discovered as part of query, or, by an outgoing session (but peer + // is behind NAT and responds from a different socket) + + // NOTE: `discv5::Discv5` won't initiate a session with any peer with an + // unverifiable node record, for example one that advertises a reserved LAN IP + // address on a WAN network. This is in order to prevent DoS attacks, where some + // malicious peers may advertise a victim's socket. We will still try and connect + // to them over RLPx, to be compatible with EL discv5 implementations that don't + // enforce this security measure. + + trace!(target: "net::discv5", + ?enr, + %socket, + "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" + ); + + self.on_discovered_peer(&enr, socket) + } + _ => None } } @@ -259,10 +285,12 @@ impl Discv5 { enr: &discv5::Enr, socket: SocketAddr, ) -> Option { + self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(enr); + let node_record = match self.try_into_reachable(enr, socket) { Ok(enr_bc) => enr_bc, Err(err) => { - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", %err, ?enr, "discovered peer is unreachable" @@ -274,7 +302,7 @@ impl Discv5 { } }; if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", ?enr, reason, "filtered out discovered peer" @@ -290,7 +318,7 @@ impl Discv5 { .then(|| self.get_fork_id(enr).ok()) .flatten(); - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", ?fork_id, ?enr, "discovered peer" @@ -300,11 +328,7 @@ impl Discv5 { } /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], - /// w.r.t. local [`IpMode`]. Tries the socket from which the ENR was sent, if socket is missing - /// from ENR. - /// - /// Note: [`discv5::Discv5`] won't initiate a session with any peer with a malformed node - /// record, that advertises a reserved IP address on a WAN network. + /// w.r.t. local [`IpMode`]. Uses source socket as udp socket. pub fn try_into_reachable( &self, enr: &discv5::Enr, @@ -312,8 +336,6 @@ impl Discv5 { ) -> Result { let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?; - let udp_socket = self.ip_mode().get_contactable_addr(enr).unwrap_or(socket); - // since we, on bootstrap, set tcp4 in local ENR for `IpMode::Dual`, we prefer tcp4 here // too let Some(tcp_port) = (match self.ip_mode() { @@ -323,7 +345,7 @@ impl Discv5 { return Err(Error::IpVersionMismatchRlpx(self.ip_mode())) }; - Ok(NodeRecord { address: udp_socket.ip(), tcp_port, udp_port: udp_socket.port(), id }) + Ok(NodeRecord { address: socket.ip(), tcp_port, udp_port: socket.port(), id }) } /// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be @@ -620,7 +642,7 @@ pub async fn lookup( } #[cfg(test)] -mod tests { +mod test { use super::*; use ::enr::{CombinedKey, EnrKey}; use reth_primitives::MAINNET; @@ -674,7 +696,7 @@ mod tests { let (node_2, mut stream_2, _) = start_discovery_node(30355).await; let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr()); - trace!(target: "net::discovery::tests", + trace!(target: "net::discv5::test", node_1_node_id=format!("{:#}", node_1_enr.node_id()), node_2_node_id=format!("{:#}", node_2_enr.node_id()), "started nodes" From d9f4adc2ebb373220e681acad537a58251dfe214 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 3 May 2024 16:50:40 +0200 Subject: [PATCH 188/250] chore(deps): bump alloy, evm-inspectors (#8077) --- Cargo.lock | 136 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 32 ++++++------- 2 files changed, 84 insertions(+), 84 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3f0450ab..1828b2a2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "c-kzg", "serde", "sha2 0.10.8", @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ab339ca7b4ea9115f0578c941abc80a171edf8e5eadd01e6c4237b68db8083" +checksum = "545885d9b0b2c30fd344ae291439b4bfe59e48dd62fbc862f8503d98088967dc" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -180,11 +180,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "arbitrary", "c-kzg", "derive_more", @@ -212,10 +212,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "serde", "serde_json", ] @@ -233,9 +233,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44294729c145cf7ae65feab544b5b81fb2bb7e2fd060214842eb3989a1e9d882" +checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -246,7 +246,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "serde", @@ -258,13 +258,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -275,9 +275,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "k256", "serde_json", @@ -289,9 +289,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c715249705afa1e32be79dabfd35e2ef0f1cc02ad2cf48c9d1e20026ee637b" +checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" dependencies = [ "alloy-rlp", "arbitrary", @@ -317,14 +317,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -367,7 +367,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -387,14 +387,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -427,24 +427,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -457,11 +457,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "serde", "serde_json", ] @@ -469,7 +469,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "serde", @@ -489,7 +489,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-primitives", "async-trait", @@ -502,9 +502,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -518,9 +518,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef9a94a27345fb31e3fcb5f5e9f592bb4847493b07fa1e47dd9fde2222f2e28" +checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -537,9 +537,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31fe73cd259527e24dc2dbfe64bc95e5ddfcd2b2731f670a11ff72b2be2c25b" +checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" dependencies = [ "alloy-json-abi", "const-hex", @@ -554,18 +554,18 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c8d6e74e4feeaa2bcfdecfd3da247ab53c67bd654ba1907270c32e02b142331" +checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ "winnow 0.6.7", ] [[package]] name = "alloy-sol-types" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afaffed78bfb17526375754931e045f96018aa810844b29c7aef823266dd4b4b" +checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -577,7 +577,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -595,7 +595,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=ca54552#ca54552075da02339f678e5b591877ff6c2939db" +source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2951,7 +2951,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6567,8 +6567,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "arbitrary", "bytes", @@ -6773,9 +6773,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7512,8 +7512,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7778,10 +7778,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7808,7 +7808,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7914,7 +7914,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "reth-primitives", "secp256k1", ] @@ -8048,10 +8048,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=3d2077e#3d2077ee665046c256448a8bd90d8e93ea85de56" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=d15add2#d15add2614fc359025f43bd7ad6096719580ba81" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=ca54552)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -9155,9 +9155,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70aba06097b6eda3c15f6eebab8a6339e121475bcf08bbe6758807e716c372a1" +checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 12b31162d..0aca2afbb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,29 +282,29 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "3d2077e" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "d15add2" } # eth alloy-chains = "0.1.15" -alloy-primitives = "0.7.1" -alloy-dyn-abi = "0.7.1" -alloy-sol-types = "0.7.1" +alloy-primitives = "0.7.2" +alloy-dyn-abi = "0.7.2" +alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "af788af", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "ca54552" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "ca54552" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "af788af" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } # misc auto_impl = "1" From 66f70838ae90cd6d031dfa058d7e5f6ed2bd9fad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 May 2024 17:26:48 +0200 Subject: [PATCH 189/250] chore: move dao hardfork constants to evm-ethereum (#8078) --- .../src/eth_dao_fork.rs => ethereum/evm/src/dao_fork.rs} | 0 crates/ethereum/evm/src/execute.rs | 7 +++++-- crates/ethereum/evm/src/lib.rs | 3 +++ crates/revm/src/lib.rs | 3 --- 4 files changed, 8 insertions(+), 5 deletions(-) rename crates/{revm/src/eth_dao_fork.rs => ethereum/evm/src/dao_fork.rs} (100%) diff --git a/crates/revm/src/eth_dao_fork.rs b/crates/ethereum/evm/src/dao_fork.rs similarity index 100% rename from crates/revm/src/eth_dao_fork.rs rename to crates/ethereum/evm/src/dao_fork.rs diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b65e7be17..ff3a4e76d 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,6 +1,10 @@ //! Ethereum block executor. -use crate::{verify::verify_receipts, EthEvmConfig}; +use crate::{ + dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + verify::verify_receipts, + EthEvmConfig, +}; use reth_evm::{ execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, @@ -19,7 +23,6 @@ use reth_primitives::{ use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 88621a66a..0c8506ff7 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -18,6 +18,9 @@ use reth_revm::{Database, EvmBuilder}; pub mod execute; pub mod verify; +/// Ethereum DAO hardfork state change data. +pub mod dao_fork; + /// Ethereum-related EVM configuration. #[derive(Debug, Clone, Copy, Default)] #[non_exhaustive] diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 375b230ab..7f950afb0 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -16,9 +16,6 @@ pub mod batch; /// State changes that are not related to transactions. pub mod state_change; -/// Ethereum DAO hardfork state change data. -pub mod eth_dao_fork; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; From ead753db4c96f5f9f0fd2867106d88ef6d312405 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 3 May 2024 18:12:03 +0200 Subject: [PATCH 190/250] fix(cli): debug merkle script (#8067) --- bin/reth/src/commands/debug_cmd/merkle.rs | 348 ++++++++++------------ 1 file changed, 152 insertions(+), 196 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 6d895fccf..75ad1870f 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,28 +17,25 @@ use reth_cli_runner::CliContext; use reth_config::Config; use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; -use reth_exex::ExExManagerHandle; +use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_primitives::{ - fs, - stage::{StageCheckpoint, StageId}, - BlockHashOrNumber, ChainSpec, PruneModes, +use reth_primitives::{fs, stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; +use reth_provider::{ + BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, ProviderFactory, }; -use reth_provider::{BlockWriter, ProviderFactory, StageCheckpointReader}; +use reth_revm::database::StateProviderDatabase; use reth_stages::{ - stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - }, + stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, ExecInput, Stage, }; use reth_tasks::TaskExecutor; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; -use tracing::{debug, info, warn}; +use tracing::*; -/// `reth merkle-debug` command +/// `reth debug merkle` command #[derive(Debug, Parser)] pub struct Command { /// The path to the data dir for all reth files and subdirectories. @@ -140,6 +137,8 @@ impl Command { ) .await?; + let executor_provider = block_executor!(self.chain.clone()); + // Initialize the fetch client info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range"); let fetch_client = network.fetch_client().await?; @@ -160,224 +159,181 @@ impl Command { let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); let block_range_client = FullBlockClient::new(fetch_client, consensus); - // get the execution checkpoint - let execution_checkpoint_block = - provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number; - assert!(execution_checkpoint_block < self.to, "Nothing to run"); + // get best block number + let best_block_number = provider_rw.best_block_number()?; + assert!(best_block_number < self.to, "Nothing to run"); // get the block range from the network - info!(target: "reth::cli", target_block_number=?self.to, "Downloading range of blocks"); - let block_range = block_range_client - .get_full_block_range(to_header.hash_slow(), self.to - execution_checkpoint_block) + let block_range = best_block_number..=self.to; + info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); + let blocks = block_range_client + .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) .await; - // recover senders - let blocks_with_senders = - block_range.into_iter().map(|block| block.try_seal_with_senders()); - - // insert the blocks - for senders_res in blocks_with_senders { - let sealed_block = match senders_res { - Ok(senders) => senders, - Err(err) => { - warn!(target: "reth::cli", "Error sealing block with senders: {err:?}. Skipping..."); - continue - } - }; - provider_rw.insert_block(sealed_block, None)?; - } - - // Check if any of hashing or merkle stages aren't on the same block number as - // Execution stage or have any intermediate progress. - let should_reset_stages = - [StageId::AccountHashing, StageId::StorageHashing, StageId::MerkleExecute] - .into_iter() - .map(|stage_id| provider_rw.get_stage_checkpoint(stage_id)) - .collect::, _>>()? - .into_iter() - .map(Option::unwrap_or_default) - .any(|checkpoint| { - checkpoint.block_number != execution_checkpoint_block || - checkpoint.stage_checkpoint.is_some() - }); - - let executor = block_executor!(self.chain.clone()); - let mut execution_stage = ExecutionStage::new( - executor, - ExecutionStageThresholds { - max_blocks: Some(1), - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - PruneModes::all(), - ExExManagerHandle::empty(), - ); + let mut td = provider_rw + .header_td_by_number(best_block_number)? + .ok_or(ProviderError::TotalDifficultyNotFound(best_block_number))?; let mut account_hashing_stage = AccountHashingStage::default(); let mut storage_hashing_stage = StorageHashingStage::default(); let mut merkle_stage = MerkleStage::default_execution(); - for block in execution_checkpoint_block + 1..=self.to { - tracing::trace!(target: "reth::cli", block, "Executing block"); - let progress = - if (!should_reset_stages || block > execution_checkpoint_block + 1) && block > 0 { - Some(block - 1) - } else { - None - }; - - execution_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: block.checked_sub(1).map(StageCheckpoint::new), - }, + for block in blocks.into_iter().rev() { + let block_number = block.number; + let sealed_block = block + .try_seal_with_senders() + .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; + trace!(target: "reth::cli", block_number, "Executing block"); + + provider_rw.insert_block(sealed_block.clone(), None)?; + + td += sealed_block.difficulty; + let mut executor = executor_provider.batch_executor( + StateProviderDatabase::new(LatestStateProviderRef::new( + provider_rw.tx_ref(), + provider_rw.static_file_provider().clone(), + )), + PruneModes::none(), + ); + executor.execute_one((&sealed_block.clone().unseal(), td).into())?; + let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage( + provider_rw.tx_ref(), + None, + OriginalValuesKnown::Yes, )?; + let checkpoint = Some(StageCheckpoint::new(block_number - 1)); + let mut account_hashing_done = false; while !account_hashing_done { - let output = account_hashing_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - )?; + let output = account_hashing_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; account_hashing_done = output.done; } let mut storage_hashing_done = false; while !storage_hashing_done { - let output = storage_hashing_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - )?; + let output = storage_hashing_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; storage_hashing_done = output.done; } - let incremental_result = merkle_stage.execute( - &provider_rw, - ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new) }, - ); + let incremental_result = merkle_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint }); - if incremental_result.is_err() { - tracing::warn!(target: "reth::cli", block, "Incremental calculation failed, retrying from scratch"); - let incremental_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let incremental_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - let clean_input = ExecInput { target: Some(block), checkpoint: None }; - loop { - let clean_result = merkle_stage.execute(&provider_rw, clean_input); - assert!(clean_result.is_ok(), "Clean state root calculation failed"); - if clean_result.unwrap().done { - break - } + if incremental_result.is_ok() { + debug!(target: "reth::cli", block_number, "Successfully computed incremental root"); + continue + } + + warn!(target: "reth::cli", block_number, "Incremental calculation failed, retrying from scratch"); + let incremental_account_trie = provider_rw + .tx_ref() + .cursor_read::()? + .walk_range(..)? + .collect::, _>>()?; + let incremental_storage_trie = provider_rw + .tx_ref() + .cursor_dup_read::()? + .walk_range(..)? + .collect::, _>>()?; + + let clean_input = ExecInput { target: Some(sealed_block.number), checkpoint: None }; + loop { + let clean_result = merkle_stage.execute(&provider_rw, clean_input); + assert!(clean_result.is_ok(), "Clean state root calculation failed"); + if clean_result.unwrap().done { + break } + } - let clean_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let clean_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - tracing::info!(target: "reth::cli", block, "Comparing incremental trie vs clean trie"); - - // Account trie - let mut incremental_account_mismatched = Vec::new(); - let mut clean_account_mismatched = Vec::new(); - let mut incremental_account_trie_iter = - incremental_account_trie.into_iter().peekable(); - let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); - while incremental_account_trie_iter.peek().is_some() || - clean_account_trie_iter.peek().is_some() - { - match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - similar_asserts::assert_eq!( - incremental.0, - clean.0, - "Nibbles don't match" - ); - if incremental.1 != clean.1 && - clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() - { - incremental_account_mismatched.push(incremental); - clean_account_mismatched.push(clean); - } - } - (Some(incremental), None) => { - tracing::warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); - } - (None, Some(clean)) => { - tracing::warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all account trie entries"); + let clean_account_trie = provider_rw + .tx_ref() + .cursor_read::()? + .walk_range(..)? + .collect::, _>>()?; + let clean_storage_trie = provider_rw + .tx_ref() + .cursor_dup_read::()? + .walk_range(..)? + .collect::, _>>()?; + + info!(target: "reth::cli", block_number, "Comparing incremental trie vs clean trie"); + + // Account trie + let mut incremental_account_mismatched = Vec::new(); + let mut clean_account_mismatched = Vec::new(); + let mut incremental_account_trie_iter = incremental_account_trie.into_iter().peekable(); + let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); + while incremental_account_trie_iter.peek().is_some() || + clean_account_trie_iter.peek().is_some() + { + match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { + (Some(incremental), Some(clean)) => { + similar_asserts::assert_eq!(incremental.0, clean.0, "Nibbles don't match"); + if incremental.1 != clean.1 && + clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() + { + incremental_account_mismatched.push(incremental); + clean_account_mismatched.push(clean); } } + (Some(incremental), None) => { + warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); + } + (None, Some(clean)) => { + warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); + } + (None, None) => { + info!(target: "reth::cli", "Exhausted all account trie entries"); + } } + } - // Stoarge trie - let mut first_mismatched_storage = None; - let mut incremental_storage_trie_iter = - incremental_storage_trie.into_iter().peekable(); - let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); - while incremental_storage_trie_iter.peek().is_some() || - clean_storage_trie_iter.peek().is_some() - { - match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - if incremental != clean && - clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() - { - first_mismatched_storage = Some((incremental, clean)); - break - } - } - (Some(incremental), None) => { - tracing::warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); - } - (None, Some(clean)) => { - tracing::warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all storage trie entries.") + // Stoarge trie + let mut first_mismatched_storage = None; + let mut incremental_storage_trie_iter = incremental_storage_trie.into_iter().peekable(); + let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); + while incremental_storage_trie_iter.peek().is_some() || + clean_storage_trie_iter.peek().is_some() + { + match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { + (Some(incremental), Some(clean)) => { + if incremental != clean && + clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() + { + first_mismatched_storage = Some((incremental, clean)); + break } } + (Some(incremental), None) => { + warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); + } + (None, Some(clean)) => { + warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") + } + (None, None) => { + info!(target: "reth::cli", "Exhausted all storage trie entries.") + } } - - similar_asserts::assert_eq!( - ( - incremental_account_mismatched, - first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) - ), - ( - clean_account_mismatched, - first_mismatched_storage.as_ref().map(|(_, clean)| clean) - ), - "Mismatched trie nodes" - ); } + + similar_asserts::assert_eq!( + ( + incremental_account_mismatched, + first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) + ), + ( + clean_account_mismatched, + first_mismatched_storage.as_ref().map(|(_, clean)| clean) + ), + "Mismatched trie nodes" + ); } + info!(target: "reth::cli", ?block_range, "Successfully validated incremental roots"); + Ok(()) } } From f8cd8c56a297853403a6ad2e740e07677bf4abfc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 13:50:23 +0200 Subject: [PATCH 191/250] feat: add helper functions for batch executor (#8087) --- crates/evm/src/execute.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 7b3e58646..e7ce09e79 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -34,6 +34,27 @@ pub trait BatchExecutor { /// Executes the next block in the batch and update the state internally. fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; + /// Executes multiple inputs in the batch and update the state internally. + fn execute_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> + where + I: IntoIterator>, + { + for input in inputs { + self.execute_one(input)?; + } + Ok(()) + } + + /// Executes the entire batch and return the final state. + fn execute_batch<'a, I>(mut self, batch: I) -> Result + where + I: IntoIterator>, + Self: Sized, + { + self.execute_many(batch)?; + Ok(self.finalize()) + } + /// Finishes the batch and return the final state. fn finalize(self) -> Self::Output; From 82e4ad9e764ac4003139d819246e5fa40b60b953 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sat, 4 May 2024 13:07:18 +0100 Subject: [PATCH 192/250] docs(book): update CLI (#8093) --- book/cli/reth/db/checksum.md | 124 +++++++++++++++++++++++++++ book/cli/reth/init-state.md | 158 +++++++++++++++++++++++++++++++++++ book/cli/reth/node.md | 15 +--- 3 files changed, 285 insertions(+), 12 deletions(-) create mode 100644 book/cli/reth/db/checksum.md create mode 100644 book/cli/reth/init-state.md diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md new file mode 100644 index 000000000..6f080c74b --- /dev/null +++ b/book/cli/reth/db/checksum.md @@ -0,0 +1,124 @@ +# reth db checksum + +Calculates the content checksum of a table + +```bash +$ reth db checksum --help +Usage: reth db checksum [OPTIONS] + +Arguments: +
+ The table name + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md new file mode 100644 index 000000000..0254a43f5 --- /dev/null +++ b/book/cli/reth/init-state.md @@ -0,0 +1,158 @@ +# reth init-state + +Initialize the database from a state dump file + +```bash +$ reth init-state --help +Usage: reth init-state [OPTIONS] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --state + JSONL file with state dump. + + Must contain accounts in following format, additional account fields are ignored. Can + also contain { "root": \ } as first line. + { + "balance": "\", + "nonce": \, + "code": "\", + "storage": { + "\": "\", + .. + }, + "address": "\", + } + + Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + and including the non-genesis block to init chain at. See 'import' command. + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index d1972a608..edf0993d7 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -438,21 +438,12 @@ Debug: --debug.max-block Runs the sync only up to the specified block - --debug.print-inspector - Print opcode level traces directly to console during execution - - --debug.hook-block - Hook on a specific block during execution - - --debug.hook-transaction - Hook on a specific transaction during execution - - --debug.hook-all - Hook on every transaction in a block - --debug.skip-fcu If provided, the engine will skip `n` consecutive FCUs + --debug.skip-new-payload + If provided, the engine will skip `n` consecutive new payloads + --debug.engine-api-store The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location From ac1d5324ec597ad6494a3025f97c5d4dd501945e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 14:35:23 +0200 Subject: [PATCH 193/250] chore: rm unused file (#8096) --- crates/rpc/rpc-types/src/admin.rs | 123 ------------------------------ 1 file changed, 123 deletions(-) delete mode 100644 crates/rpc/rpc-types/src/admin.rs diff --git a/crates/rpc/rpc-types/src/admin.rs b/crates/rpc/rpc-types/src/admin.rs deleted file mode 100644 index aeb44fab6..000000000 --- a/crates/rpc/rpc-types/src/admin.rs +++ /dev/null @@ -1,123 +0,0 @@ -use crate::{NodeRecord, PeerId}; -use alloy_genesis::ChainConfig; -use alloy_primitives::{B256, U256}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - net::{IpAddr, SocketAddr}, -}; - -/// Represents the `admin_nodeInfo` response, which can be queried for all the information -/// known about the running node at the networking granularity. -/// -/// Note: this format is not standardized. Reth follows Geth's format, -/// see: -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct NodeInfo { - /// Enode of the node in URL format. - pub enode: NodeRecord, - /// ID of the local node. - pub id: PeerId, - /// IP of the local node. - pub ip: IpAddr, - /// Address exposed for listening for the local node. - #[serde(rename = "listenAddr")] - pub listen_addr: SocketAddr, - /// Ports exposed by the node for discovery and listening. - pub ports: Ports, - /// Name of the network - pub name: String, - /// Networking protocols being run by the local node. - pub protocols: Protocols, -} - -impl NodeInfo { - /// Creates a new instance of `NodeInfo`. - pub fn new(enr: NodeRecord, status: NetworkStatus, config: ChainConfig) -> NodeInfo { - NodeInfo { - enode: enr, - id: enr.id, - ip: enr.address, - listen_addr: enr.tcp_addr(), - ports: Ports { discovery: enr.udp_port, listener: enr.tcp_port }, - name: status.client_version, - protocols: Protocols { - eth: EthProtocolInfo::new(status.eth_protocol_info, config), - other: Default::default(), - }, - } - } -} - -/// All supported protocols -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Protocols { - /// Info about `eth` sub-protocol - pub eth: EthProtocolInfo, - /// Placeholder for any other protocols - #[serde(flatten, default)] - pub other: BTreeMap, -} - -/// Ports exposed by the node for discovery and listening. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Ports { - /// Port exposed for node discovery. - pub discovery: u16, - /// Port exposed for listening. - pub listener: u16, -} - -/// The status of the network being ran by the local node. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct NetworkStatus { - /// The local node client version. - pub client_version: String, - /// The current ethereum protocol version - pub protocol_version: u64, - /// Information about the Ethereum Wire Protocol. - pub eth_protocol_info: EthProtocolInfo, -} - -/// Information about the Ethereum Wire Protocol (ETH) -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct EthProtocolInfo { - /// The current difficulty at the head of the chain. - pub difficulty: U256, - /// The block hash of the head of the chain. - pub head: B256, - /// Network ID in base 10. - pub network: u64, - /// Genesis block of the current chain. - pub genesis: B256, - /// Configuration of the chain. - pub config: ChainConfig, -} - -impl EthProtocolInfo { - /// Creates a new instance of `EthProtocolInfo`. - pub fn new(info: EthProtocolInfo, config: ChainConfig) -> EthProtocolInfo { - EthProtocolInfo { - difficulty: info.difficulty, - head: info.head, - network: info.network, - genesis: info.genesis, - config, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_node_info_roundtrip() { - let sample = r#"{"enode":"enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@[::]:30303","id":"44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d","ip":"::","listenAddr":"[::]:30303","name":"reth","ports":{"discovery":30303,"listener":30303},"protocols":{"eth":{"difficulty":17334254859343145000,"genesis":"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3","head":"0xb83f73fbe6220c111136aefd27b160bf4a34085c65ba89f24246b3162257c36a","network":1, "config": {"chainId": 17000,"homesteadBlock": 0,"daoForkSupport": true,"eip150Block": 0,"eip155Block": 0,"eip158Block": 0,"byzantiumBlock": 0,"constantinopleBlock": 0,"petersburgBlock": 0,"istanbulBlock": 0,"berlinBlock": 0,"londonBlock": 0,"shanghaiTime": 1696000704,"cancunTime": 1707305664,"terminalTotalDifficulty": 0,"terminalTotalDifficultyPassed": true,"ethash": {}}}}}"#; - - let info: NodeInfo = serde_json::from_str(sample).unwrap(); - let serialized = serde_json::to_string_pretty(&info).unwrap(); - let de_serialized: NodeInfo = serde_json::from_str(&serialized).unwrap(); - assert_eq!(info, de_serialized) - } -} From 2c70e2ab3bd0266103348b462af1abe497397bee Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 14:40:30 +0200 Subject: [PATCH 194/250] feat: rm reth-primitives dep (#8097) --- Cargo.lock | 2 +- crates/net/types/Cargo.toml | 3 ++- crates/net/types/src/lib.rs | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1828b2a2f..cafe4b83a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7208,8 +7208,8 @@ dependencies = [ name = "reth-network-types" version = "0.2.0-beta.6" dependencies = [ + "alloy-primitives", "enr", - "reth-primitives", "reth-rpc-types", "secp256k1", "serde_with", diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml index 841a76dfe..9092236b1 100644 --- a/crates/net/types/Cargo.toml +++ b/crates/net/types/Cargo.toml @@ -14,7 +14,8 @@ workspace = true [dependencies] # reth reth-rpc-types.workspace = true -reth-primitives.workspace = true + +alloy-primitives.workspace = true # eth enr.workspace = true diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs index ccd9757c9..8d75af933 100644 --- a/crates/net/types/src/lib.rs +++ b/crates/net/types/src/lib.rs @@ -126,7 +126,7 @@ impl std::fmt::Display for AnyNode { AnyNode::NodeRecord(record) => write!(f, "{record}"), AnyNode::Enr(enr) => write!(f, "{enr}"), AnyNode::PeerId(peer_id) => { - write!(f, "enode://{}", reth_primitives::hex::encode(peer_id.as_slice())) + write!(f, "enode://{}", alloy_primitives::hex::encode(peer_id.as_slice())) } } } From bff14c603f26f5d0308ad930a16b7c002fbf3511 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 May 2024 14:40:39 +0200 Subject: [PATCH 195/250] chore: rm leftover peer.rs file (#8098) --- crates/primitives/src/peer.rs | 227 ---------------------------------- 1 file changed, 227 deletions(-) delete mode 100644 crates/primitives/src/peer.rs diff --git a/crates/primitives/src/peer.rs b/crates/primitives/src/peer.rs deleted file mode 100644 index f66361f39..000000000 --- a/crates/primitives/src/peer.rs +++ /dev/null @@ -1,227 +0,0 @@ -use enr::Enr; -use reth_rpc_types::NodeRecord; -use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; -use std::{net::IpAddr, str::FromStr}; - -// Re-export PeerId for ease of use. -pub use reth_rpc_types::PeerId; - -/// This tag should be set to indicate to libsecp256k1 that the following bytes denote an -/// uncompressed pubkey. -/// -/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` = `0x04` -/// -/// See: -const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4; - -/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the -/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [PeerId]. -#[inline] -pub fn pk2id(pk: &PublicKey) -> PeerId { - PeerId::from_slice(&pk.serialize_uncompressed()[1..]) -} - -/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the -/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. -#[inline] -pub fn id2pk(id: PeerId) -> Result { - // NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed - // public key. - let mut s = [0u8; UNCOMPRESSED_PUBLIC_KEY_SIZE]; - s[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED; - s[1..].copy_from_slice(id.as_slice()); - PublicKey::from_slice(&s) -} - -/// A peer that can come in ENR or [NodeRecord] form. -#[derive( - Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr, -)] -pub enum AnyNode { - /// An "enode:" peer with full ip - NodeRecord(NodeRecord), - /// An "enr:" - Enr(Enr), - /// An incomplete "enode" with only a peer id - PeerId(PeerId), -} - -impl AnyNode { - /// Returns the peer id of the node. - pub fn peer_id(&self) -> PeerId { - match self { - AnyNode::NodeRecord(record) => record.id, - AnyNode::Enr(enr) => pk2id(&enr.public_key()), - AnyNode::PeerId(peer_id) => *peer_id, - } - } - - /// Returns the full node record if available. - pub fn node_record(&self) -> Option { - match self { - AnyNode::NodeRecord(record) => Some(*record), - AnyNode::Enr(enr) => { - let node_record = NodeRecord { - address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, - tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, - udp_port: enr.udp4().or_else(|| enr.udp6())?, - id: pk2id(&enr.public_key()), - } - .into_ipv4_mapped(); - Some(node_record) - } - _ => None, - } - } -} - -impl From for AnyNode { - fn from(value: NodeRecord) -> Self { - Self::NodeRecord(value) - } -} - -impl From> for AnyNode { - fn from(value: Enr) -> Self { - Self::Enr(value) - } -} - -impl FromStr for AnyNode { - type Err = String; - - fn from_str(s: &str) -> Result { - if let Some(rem) = s.strip_prefix("enode://") { - if let Ok(record) = NodeRecord::from_str(s) { - return Ok(AnyNode::NodeRecord(record)) - } - // incomplete enode - if let Ok(peer_id) = PeerId::from_str(rem) { - return Ok(AnyNode::PeerId(peer_id)) - } - return Err(format!("invalid public key: {rem}")) - } - if s.starts_with("enr:") { - return Enr::from_str(s).map(AnyNode::Enr) - } - Err("missing 'enr:' prefix for base64-encoded record".to_string()) - } -} - -impl std::fmt::Display for AnyNode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - AnyNode::NodeRecord(record) => write!(f, "{record}"), - AnyNode::Enr(enr) => write!(f, "{enr}"), - AnyNode::PeerId(peer_id) => { - write!(f, "enode://{}", crate::hex::encode(peer_id.as_slice())) - } - } - } -} - -/// Generic wrapper with peer id -#[derive(Debug)] -pub struct WithPeerId(PeerId, pub T); - -impl From<(PeerId, T)> for WithPeerId { - fn from(value: (PeerId, T)) -> Self { - Self(value.0, value.1) - } -} - -impl WithPeerId { - /// Wraps the value with the peerid. - pub fn new(peer: PeerId, value: T) -> Self { - Self(peer, value) - } - - /// Get the peer id - pub fn peer_id(&self) -> PeerId { - self.0 - } - - /// Get the underlying data - pub fn data(&self) -> &T { - &self.1 - } - - /// Returns ownership of the underlying data. - pub fn into_data(self) -> T { - self.1 - } - - /// Transform the data - pub fn transform>(self) -> WithPeerId { - WithPeerId(self.0, self.1.into()) - } - - /// Split the wrapper into [PeerId] and data tuple - pub fn split(self) -> (PeerId, T) { - (self.0, self.1) - } - - /// Maps the inner value to a new value using the given function. - pub fn map U>(self, op: F) -> WithPeerId { - WithPeerId(self.0, op(self.1)) - } -} - -impl WithPeerId> { - /// returns `None` if the inner value is `None`, otherwise returns `Some(WithPeerId)`. - pub fn transpose(self) -> Option> { - self.1.map(|v| WithPeerId(self.0, v)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use secp256k1::SECP256K1; - - #[test] - fn test_node_record_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: AnyNode = url.parse().unwrap(); - assert_eq!(node, AnyNode::NodeRecord(NodeRecord { - address: IpAddr::V4([10,3,58,6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - })); - assert_eq!(node.to_string(), url) - } - - #[test] - fn test_peer_id_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0"; - let node: AnyNode = url.parse().unwrap(); - assert_eq!(node, AnyNode::PeerId("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap())); - assert_eq!(node.to_string(), url); - - let url = "enode://"; - let err = url.parse::().unwrap_err(); - assert_eq!(err, "invalid public key: "); - } - - // - #[test] - fn test_enr_parse() { - let url = "enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"; - let node: AnyNode = url.parse().unwrap(); - assert_eq!( - node.peer_id(), - "0xca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f" - .parse::() - .unwrap() - ); - assert_eq!(node.to_string(), url); - } - - #[test] - fn pk2id2pk() { - let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); - let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); - assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); - } -} From 4b78706ed6ea7d6a9dcaed7a09c5bd5dafef5a30 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 4 May 2024 14:55:03 +0200 Subject: [PATCH 196/250] chore(cli): fix displayed block range in merkle debug script (#8091) --- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 75ad1870f..c42cbdd4d 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -164,7 +164,7 @@ impl Command { assert!(best_block_number < self.to, "Nothing to run"); // get the block range from the network - let block_range = best_block_number..=self.to; + let block_range = best_block_number + 1..=self.to; info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); let blocks = block_range_client .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) From d01996103bcba6efa6b2eadcd587d444d0fcfe65 Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Sat, 4 May 2024 22:04:34 +0530 Subject: [PATCH 197/250] replace U64 fields with primitive u64 (#8099) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-types/src/mev.rs | 133 +++++++++++++++++++++---------- crates/rpc/rpc/src/eth/bundle.rs | 2 +- 2 files changed, 92 insertions(+), 43 deletions(-) diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index ae94375db..9126c0963 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -1,12 +1,11 @@ //! MEV bundle type bindings use crate::{BlockId, BlockNumberOrTag, Log}; -use alloy_primitives::{Address, Bytes, TxHash, B256, U256, U64}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use serde::{ ser::{SerializeSeq, Serializer}, Deserialize, Deserializer, Serialize, }; - /// A bundle of transactions to send to the matchmaker. /// /// Note: this is for `mev_sendBundle` and not `eth_sendBundle`. @@ -35,28 +34,33 @@ pub struct SendBundleRequest { #[serde(rename_all = "camelCase")] pub struct Inclusion { /// The first block the bundle is valid for. - pub block: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block: u64, /// The last block the bundle is valid for. - #[serde(skip_serializing_if = "Option::is_none")] - pub max_block: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub max_block: Option, } impl Inclusion { /// Creates a new inclusion with the given min block.. pub fn at_block(block: u64) -> Self { - Self { block: U64::from(block), max_block: None } + Self { block, max_block: None } } /// Returns the block number of the first block the bundle is valid for. #[inline] pub fn block_number(&self) -> u64 { - self.block.to() + self.block } /// Returns the block number of the last block the bundle is valid for. #[inline] pub fn max_block_number(&self) -> Option { - self.max_block.as_ref().map(|b| b.to()) + self.max_block.as_ref().map(|b| *b) } } @@ -100,8 +104,10 @@ pub struct Validity { #[serde(rename_all = "camelCase")] pub struct Refund { /// The index of the transaction in the bundle. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub body_idx: u64, /// The minimum percent of the bundle's earnings to redistribute. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub percent: u64, } @@ -113,6 +119,7 @@ pub struct RefundConfig { /// The address to refund. pub address: Address, /// The minimum percent of the bundle's earnings to redistribute. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub percent: u64, } @@ -312,26 +319,42 @@ pub struct SimBundleOverrides { /// Block used for simulation state. Defaults to latest block. /// Block header data will be derived from parent block by default. /// Specify other params to override the default values. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub parent_block: Option, /// Block number used for simulation, defaults to parentBlock.number + 1 - #[serde(skip_serializing_if = "Option::is_none")] - pub block_number: Option, + #[serde(default, with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint")] + pub block_number: Option, /// Coinbase used for simulation, defaults to parentBlock.coinbase - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub coinbase: Option
, /// Timestamp used for simulation, defaults to parentBlock.timestamp + 12 - #[serde(skip_serializing_if = "Option::is_none")] - pub timestamp: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub timestamp: Option, /// Gas limit used for simulation, defaults to parentBlock.gasLimit - #[serde(skip_serializing_if = "Option::is_none")] - pub gas_limit: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub gas_limit: Option, /// Base fee used for simulation, defaults to parentBlock.baseFeePerGas - #[serde(skip_serializing_if = "Option::is_none")] - pub base_fee: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub base_fee: Option, /// Timeout in seconds, defaults to 5 - #[serde(skip_serializing_if = "Option::is_none")] - pub timeout: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub timeout: Option, } /// Response from the matchmaker after sending a simulation request. @@ -341,20 +364,25 @@ pub struct SimBundleResponse { /// Whether the simulation was successful. pub success: bool, /// Error message if the simulation failed. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option, /// The block number of the simulated block. - pub state_block: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub state_block: u64, /// The gas price of the simulated block. - pub mev_gas_price: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub mev_gas_price: u64, /// The profit of the simulated block. - pub profit: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub profit: u64, /// The refundable value of the simulated block. - pub refundable_value: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub refundable_value: u64, /// The gas used by the simulated block. - pub gas_used: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub gas_used: u64, /// Logs returned by mev_simBundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub logs: Option>, } @@ -363,18 +391,18 @@ pub struct SimBundleResponse { #[serde(rename_all = "camelCase")] pub struct SimBundleLogs { /// Logs for transactions in bundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub tx_logs: Option>, /// Logs for bundles in bundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub bundle_logs: Option>, } impl SendBundleRequest { /// Create a new bundle request. pub fn new( - block_num: U64, - max_block: Option, + block_num: u64, + max_block: Option, protocol_version: ProtocolVersion, bundle_body: Vec, ) -> Self { @@ -404,8 +432,12 @@ pub struct PrivateTransactionRequest { pub tx: Bytes, /// Hex-encoded number string, optional. Highest block number in which the transaction should /// be included. - #[serde(skip_serializing_if = "Option::is_none")] - pub max_block_number: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub max_block_number: Option, /// Preferences for private transaction. #[serde(default, skip_serializing_if = "PrivateTransactionPreferences::is_empty")] pub preferences: PrivateTransactionPreferences, @@ -415,10 +447,10 @@ pub struct PrivateTransactionRequest { #[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)] pub struct PrivateTransactionPreferences { /// Requirements for the bundle to be included in the block. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub validity: Option, /// Preferences on what data should be shared about the bundle and its transactions - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub privacy: Option, } @@ -593,18 +625,27 @@ pub struct EthSendBundle { /// A list of hex-encoded signed transactions pub txs: Vec, /// hex-encoded block number for which this bundle is valid - pub block_number: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block_number: u64, /// unix timestamp when this bundle becomes active - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub min_timestamp: Option, /// unix timestamp how long this bundle stays valid - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub max_timestamp: Option, /// list of hashes of possibly reverting txs #[serde(default, skip_serializing_if = "Vec::is_empty")] pub reverting_tx_hashes: Vec, /// UUID that can be used to cancel/replace this bundle - #[serde(rename = "replacementUuid", skip_serializing_if = "Option::is_none")] + #[serde(default, rename = "replacementUuid", skip_serializing_if = "Option::is_none")] pub replacement_uuid: Option, } @@ -625,11 +666,16 @@ pub struct EthCallBundle { /// A list of hex-encoded signed transactions pub txs: Vec, /// hex encoded block number for which this bundle is valid on - pub block_number: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block_number: u64, /// Either a hex encoded number or a block tag for which state to base this simulation on pub state_block_number: BlockNumberOrTag, /// the timestamp to use for this bundle simulation, in seconds since the unix epoch - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub timestamp: Option, } @@ -654,8 +700,10 @@ pub struct EthCallBundleResponse { /// Results of individual transactions within the bundle pub results: Vec, /// The block number used as a base for this simulation + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub state_block_number: u64, /// The total gas used by all transactions in the bundle + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub total_gas_used: u64, } @@ -678,6 +726,7 @@ pub struct EthCallBundleTransactionResult { #[serde(with = "u256_numeric_string")] pub gas_price: U256, /// The amount of gas used by the transaction + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub gas_used: u64, /// The address to which the transaction is sent (optional) pub to_address: Option
, @@ -827,7 +876,7 @@ mod tests { let bundle = SendBundleRequest { protocol_version: ProtocolVersion::V0_1, - inclusion: Inclusion { block: U64::from(1), max_block: None }, + inclusion: Inclusion { block: 1, max_block: None }, bundle_body, validity, privacy, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index c2d56df31..0523141eb 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -52,7 +52,7 @@ where EthBundleError::EmptyBundleTransactions.to_string(), )) } - if block_number.to::() == 0 { + if block_number == 0 { return Err(EthApiError::InvalidParams( EthBundleError::BundleMissingBlockNumber.to_string(), )) From 101e99f57fec3734bb5cfc9a0cb3cd8c429e341a Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sun, 5 May 2024 12:31:12 +0200 Subject: [PATCH 198/250] ci: remove check-cfg job (#8106) --- .github/workflows/lint.yml | 21 +++------------------ crates/net/eth-wire/tests/fuzz_roundtrip.rs | 2 +- crates/net/network/tests/it/main.rs | 1 - 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b939e159d..4f3632875 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,8 +27,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: - cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + - run: cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" env: RUSTFLAGS: -D warnings @@ -95,9 +94,7 @@ jobs: env: # Keep in sync with ./book.yml:jobs.build # This should only add `-D warnings` - RUSTDOCFLAGS: - --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page - -Zunstable-options -D warnings + RUSTDOCFLAGS: --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options -D warnings fmt: name: fmt @@ -126,23 +123,11 @@ jobs: with: cmd: jq empty etc/grafana/dashboards/overview.json - check-cfg: - name: check-cfg - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - run: cargo +nightly -Zcheck-cfg c - lint-success: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana, check-cfg] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index 9bd75e3f3..1fc5ea0bf 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -48,7 +48,7 @@ macro_rules! fuzz_type_and_name { }; } -#[cfg(any(test, feature = "bench"))] +#[cfg(test)] pub mod fuzz_rlp { use crate::roundtrip_encoding; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index 2bed287d6..1b4494abd 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -4,7 +4,6 @@ mod multiplex; mod requests; mod session; mod startup; -#[cfg(not(feature = "optimism"))] mod txgossip; fn main() {} From fa59ec8078b6c388b75b4f971013be451b8ab128 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 5 May 2024 11:23:14 +0000 Subject: [PATCH 199/250] chore(deps): weekly `cargo update` (#8104) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 112 ++++++++++++++++++++++++++++------------------------- 1 file changed, 59 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cafe4b83a..68d67a451 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -148,7 +148,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -156,7 +156,6 @@ dependencies = [ "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", "c-kzg", "serde", - "sha2 0.10.8", ] [[package]] @@ -199,7 +198,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -207,6 +206,7 @@ dependencies = [ "c-kzg", "once_cell", "serde", + "sha2 0.10.8", ] [[package]] @@ -223,7 +223,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -409,7 +409,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -479,7 +479,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#c058e32d2025a2fd60b2617554ade7afeaca9c47" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -648,47 +648,48 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -1015,9 +1016,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backon" @@ -1735,9 +1736,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "comfy-table" @@ -3109,7 +3110,7 @@ dependencies = [ [[package]] name = "foundry-blob-explorers" version = "0.1.0" -source = "git+https://github.com/foundry-rs/block-explorers#cd824d3fc53feca59ca6a2fc76f191fbb3ac2011" +source = "git+https://github.com/foundry-rs/block-explorers#adcb750e8d8e57f7decafca433118bf7836ffd55" dependencies = [ "alloy-chains", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -4243,6 +4244,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -4662,9 +4669,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" dependencies = [ "either", "fnv", @@ -4673,6 +4680,7 @@ dependencies = [ "instant", "libp2p-core", "libp2p-identity", + "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -5298,9 +5306,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -5321,9 +5329,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -5571,9 +5579,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -8470,9 +8478,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" dependencies = [ "sdd", ] @@ -8611,9 +8619,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.199" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" +checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" dependencies = [ "serde_derive", ] @@ -8629,9 +8637,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.199" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" +checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" dependencies = [ "proc-macro2", "quote", @@ -9238,9 +9246,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b139530208017f9d5a113784ed09cf1b8b22dee95eb99d51d89af1a3c2d6594e" +checksum = "f8224048089fb4c76b0569e76e00bf6cdaf06790eb5290e9582a0c485094e0a8" dependencies = [ "serde", "test-fuzz-internal", @@ -9250,9 +9258,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e78ed8148311b6a02578dee5fd77600bf8805b77b2cb8382a9435348080985" +checksum = "43cd6c1a291bd5f843f5dfb813c2fd7ad8e38de06722a14eeb54636c983485cc" dependencies = [ "bincode", "cargo_metadata", @@ -9261,9 +9269,9 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f9bc8c69f276df24e4d1c082e52ea057544495916c4aa0708b82e47f55f364" +checksum = "fffbe4466c9f941baa7dd177856ebda245d08b2aa2e3b6890d6dd8c54d6ceebe" dependencies = [ "darling 0.20.8", "itertools 0.12.1", @@ -9276,9 +9284,9 @@ dependencies = [ [[package]] name = "test-fuzz-runtime" -version = "5.0.0" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b657ccc932fde05dbac5c460bffa40809937adaa5558863fe8174526e1b3bc9" +checksum = "8fc507e8ea4887c091e1a57b65458c57b3a8fce1b6ed53afee77a174cfe41c17" dependencies = [ "hex", "num-traits", @@ -9512,9 +9520,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -9523,7 +9531,6 @@ dependencies = [ "pin-project-lite", "slab", "tokio", - "tracing", ] [[package]] @@ -9876,12 +9883,11 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" +checksum = "2a0e5d82932dfbf36df38de5df0cfe846d13430b3ae3fdc48b2e91ed692c8df7" dependencies = [ "glob", - "once_cell", "serde", "serde_derive", "serde_json", @@ -10543,18 +10549,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" dependencies = [ "proc-macro2", "quote", From 199503531c4e66cba702844a5a8224620ee0e877 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 May 2024 20:26:29 +0200 Subject: [PATCH 200/250] chore: bump alloy 0bb7604 (#8107) --- Cargo.lock | 108 +++++++++++------------ Cargo.toml | 26 +++--- crates/e2e-test-utils/src/engine_api.rs | 13 ++- crates/e2e-test-utils/src/node.rs | 2 +- crates/rpc/rpc-types-compat/src/block.rs | 1 + examples/exex/rollup/src/execution.rs | 4 +- 6 files changed, 83 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68d67a451..f054a15ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,16 +133,14 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "c-kzg", "serde", - "sha2 0.10.8", - "thiserror", ] [[package]] @@ -179,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "arbitrary", "c-kzg", "derive_more", @@ -193,6 +191,7 @@ dependencies = [ "proptest", "proptest-derive", "serde", + "sha2 0.10.8", ] [[package]] @@ -212,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "serde", "serde_json", ] @@ -246,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -258,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -275,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "k256", "serde_json", @@ -317,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -367,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -387,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -427,24 +426,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -457,11 +456,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "serde", "serde_json", ] @@ -469,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -489,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "async-trait", @@ -502,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -577,7 +576,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -595,13 +594,14 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=af788af#af788afe934d4c54ec1fcb6bb4b16ce385f913ab" +source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-json-rpc", "alloy-transport", "reqwest 0.12.4", "serde_json", "tower", + "tracing", "url", ] @@ -2952,7 +2952,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6575,8 +6575,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "arbitrary", "bytes", @@ -6781,9 +6781,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7520,8 +7520,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7786,10 +7786,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7816,7 +7816,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7922,7 +7922,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "reth-primitives", "secp256k1", ] @@ -8056,10 +8056,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=d15add2#d15add2614fc359025f43bd7ad6096719580ba81" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=42f01d0#42f01d08219f1b4fcb409b82377ec999919002de" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=af788af)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 0aca2afbb..55ff51720 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "d15add2" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "42f01d0" } # eth alloy-chains = "0.1.15" @@ -291,20 +291,20 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "af788af", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "af788af" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "af788af" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "0bb7604" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } # misc auto_impl = "1" diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index fecd9b8b7..13b735aea 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,5 +1,8 @@ use crate::traits::PayloadEnvelopeExt; -use jsonrpsee::http_client::{transport::HttpBackend, HttpClient}; +use jsonrpsee::{ + core::client::ClientT, + http_client::{transport::HttpBackend, HttpClient}, +}; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, @@ -29,6 +32,14 @@ impl EngineApiTestContext { Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } + /// Retrieves a v3 payload from the engine api as serde value + pub async fn get_payload_v3_value( + &self, + payload_id: PayloadId, + ) -> eyre::Result { + Ok(self.engine_api_client.request("engine_getPayloadV3", (payload_id,)).await?) + } + /// Submits a payload to the engine api pub async fn submit_payload( &self, diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index b2ccf899e..668af6034 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -112,7 +112,7 @@ where // wait for the payload builder to have finished building self.payload.wait_for_built_payload(eth_attr.payload_id()).await; // trigger resolve payload via engine api - self.engine_api.get_payload_v3(eth_attr.payload_id()).await?; + self.engine_api.get_payload_v3_value(eth_attr.payload_id()).await?; // ensure we're also receiving the built payload as event Ok((self.payload.expect_built_payload().await?, eth_attr)) } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index b342f8a30..1c2a44ebb 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -141,6 +141,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) excess_blob_gas: excess_blob_gas.map(u128::from), parent_beacon_block_root, total_difficulty: None, + requests_root: None, } } diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index 98a8e378c..f7a98382e 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{SidecarCoder, SimpleCoder}; +use alloy_consensus::{Blob, SidecarCoder, SimpleCoder}; use alloy_rlp::Decodable as _; use eyre::OptionExt; use reth::transaction_pool::TransactionPool; @@ -157,7 +157,7 @@ async fn decode_transactions( .map(|(blob, commitment)| (blob, kzg_to_versioned_hash((*commitment).into()))) // Filter only blobs that are present in the block data .filter(|(_, hash)| blob_hashes.contains(hash)) - .map(|(blob, _)| blob) + .map(|(blob, _)| Blob::from(*blob)) .collect::>(); if blobs.len() != blob_hashes.len() { eyre::bail!("some blobs not found") From 8f8b29b3ceba2bae7e6c40fa8e3b3f3873fcdee0 Mon Sep 17 00:00:00 2001 From: jn Date: Mon, 6 May 2024 03:14:54 -0700 Subject: [PATCH 201/250] refactor: replace futures_util pin and tokio_pin with std pin (#8109) --- Cargo.lock | 1 - crates/cli/runner/Cargo.toml | 1 - crates/cli/runner/src/lib.rs | 12 +++++++----- crates/net/eth-wire/src/multiplex.rs | 6 +++--- crates/net/network/src/listener.rs | 8 +++++--- crates/net/network/src/manager.rs | 6 +++--- crates/rpc/ipc/src/server/mod.rs | 18 ++++++++++-------- crates/tasks/src/lib.rs | 8 ++++---- 8 files changed, 32 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f054a15ba..4b3305491 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6565,7 +6565,6 @@ dependencies = [ name = "reth-cli-runner" version = "0.2.0-beta.6" dependencies = [ - "futures", "reth-tasks", "tokio", "tracing", diff --git a/crates/cli/runner/Cargo.toml b/crates/cli/runner/Cargo.toml index 697621cee..3182b738b 100644 --- a/crates/cli/runner/Cargo.toml +++ b/crates/cli/runner/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-tasks.workspace = true # async -futures.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal"] } # misc diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 31a1356c6..94536d0cb 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -10,9 +10,8 @@ //! Entrypoint for running commands. -use futures::pin_mut; use reth_tasks::{TaskExecutor, TaskManager}; -use std::future::Future; +use std::{future::Future, pin::pin}; use tracing::{debug, error, trace}; /// Executes CLI commands. @@ -141,7 +140,7 @@ where E: Send + Sync + From + 'static, { { - pin_mut!(fut); + let fut = pin!(fut); tokio::select! { err = tasks => { return Err(err.into()) @@ -166,7 +165,9 @@ where { let mut stream = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; let sigterm = stream.recv(); - pin_mut!(sigterm, ctrl_c, fut); + let sigterm = pin!(sigterm); + let ctrl_c = pin!(ctrl_c); + let fut = pin!(fut); tokio::select! { _ = ctrl_c => { @@ -181,7 +182,8 @@ where #[cfg(not(unix))] { - pin_mut!(ctrl_c, fut); + let ctrl_c = pin!(ctrl_c); + let fut = pin!(fut); tokio::select! { _ = ctrl_c => { diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 82eccd5c8..04b7cda37 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -12,7 +12,7 @@ use std::{ fmt, future::Future, io, - pin::Pin, + pin::{pin, Pin}, task::{ready, Context, Poll}, }; @@ -23,7 +23,7 @@ use crate::{ CanDisconnect, DisconnectReason, EthStream, P2PStream, Status, UnauthedEthStream, }; use bytes::{Bytes, BytesMut}; -use futures::{pin_mut, Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use reth_primitives::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -159,7 +159,7 @@ impl RlpxProtocolMultiplexer { }; let f = handshake(proxy); - pin_mut!(f); + let mut f = pin!(f); // this polls the connection and the primary stream concurrently until the handshake is // complete diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index 1575b3933..4cc219655 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -104,8 +104,10 @@ impl Stream for TcpListenerStream { #[cfg(test)] mod tests { use super::*; - use futures::pin_mut; - use std::net::{Ipv4Addr, SocketAddrV4}; + use std::{ + net::{Ipv4Addr, SocketAddrV4}, + pin::pin, + }; use tokio::macros::support::poll_fn; #[tokio::test(flavor = "multi_thread")] @@ -117,7 +119,7 @@ mod tests { let local_addr = listener.local_address(); tokio::task::spawn(async move { - pin_mut!(listener); + let mut listener = pin!(listener); match poll_fn(|cx| listener.as_mut().poll(cx)).await { ListenerEvent::Incoming { .. } => {} _ => { diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 0d2a33408..d516625c6 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -35,7 +35,7 @@ use crate::{ transactions::NetworkTransactionEvent, FetchClient, NetworkBuilder, }; -use futures::{pin_mut, Future, StreamExt}; +use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, @@ -53,7 +53,7 @@ use reth_tokio_util::EventListeners; use secp256k1::SecretKey; use std::{ net::SocketAddr, - pin::Pin, + pin::{pin, Pin}, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, @@ -902,7 +902,7 @@ where shutdown_hook: impl FnOnce(&mut Self), ) { let network = self; - pin_mut!(network, shutdown); + let mut network = pin!(network); let mut graceful_guard = None; tokio::select! { diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index ed0eadb4a..046087454 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -19,7 +19,7 @@ use jsonrpsee::{ use std::{ future::Future, io, - pin::Pin, + pin::{pin, Pin}, sync::Arc, task::{Context, Poll}, }; @@ -155,7 +155,7 @@ where let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); let stopped = stop_handle.clone().shutdown(); - tokio::pin!(stopped); + let mut stopped = pin!(stopped); let (drop_on_completion, mut process_connection_awaiter) = mpsc::channel::<()>(1); @@ -223,7 +223,7 @@ where S: Future + Unpin, { let accept = listener.accept(); - tokio::pin!(accept); + let accept = pin!(accept); match futures_util::future::select(accept, stopped).await { Either::Left((res, stop)) => match res { @@ -506,11 +506,11 @@ async fn to_ipc_service( pending_calls: Default::default(), items: Default::default(), }; - tokio::pin!(conn, rx_item); - let stopped = stop_handle.shutdown(); - tokio::pin!(stopped); + let mut conn = pin!(conn); + let mut rx_item = pin!(rx_item); + let mut stopped = pin!(stopped); loop { tokio::select! { @@ -522,7 +522,7 @@ async fn to_ipc_service( conn.push_back(item); } } - _ = &mut stopped=> { + _ = &mut stopped => { // shutdown break } @@ -844,6 +844,7 @@ mod tests { PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; use reth_tracing::init_test_tracing; + use std::pin::pin; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -854,7 +855,8 @@ mod tests { let sink = pending.accept().await.unwrap(); let closed = sink.closed(); - futures::pin_mut!(closed, stream); + let mut closed = pin!(closed); + let mut stream = pin!(stream); loop { match select(closed, stream.next()).await { diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 0f93e5bc5..3e526a344 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -19,12 +19,12 @@ use crate::{ use dyn_clone::DynClone; use futures_util::{ future::{select, BoxFuture}, - pin_mut, Future, FutureExt, TryFutureExt, + Future, FutureExt, TryFutureExt, }; use std::{ any::Any, fmt::{Display, Formatter}, - pin::Pin, + pin::{pin, Pin}, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -334,7 +334,7 @@ impl TaskExecutor { async move { // Create an instance of IncCounterOnDrop with the counter to increment let _inc_counter_on_drop = IncCounterOnDrop::new(finished_regular_tasks_metrics); - pin_mut!(fut); + let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } } @@ -409,7 +409,7 @@ impl TaskExecutor { let task = async move { // Create an instance of IncCounterOnDrop with the counter to increment let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_metrics); - pin_mut!(task); + let task = pin!(task); let _ = select(on_shutdown, task).await; }; From f83a872dd6045a27fc06a95c06363139570edecb Mon Sep 17 00:00:00 2001 From: alpharush <0xalpharush@protonmail.com> Date: Mon, 6 May 2024 05:16:27 -0500 Subject: [PATCH 202/250] feat: improve exex examples' validation (#8116) --- examples/exex/op-bridge/src/main.rs | 21 ++++++++++++++++++--- examples/exex/rollup/src/main.rs | 12 ++++++++---- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 0f48b0a5f..02c87ba15 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -3,7 +3,7 @@ use futures::Future; use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_primitives::{Log, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{address, Address, Log, SealedBlockWithSenders, TransactionSigned}; use reth_provider::Chain; use reth_tracing::tracing::info; use rusqlite::Connection; @@ -11,6 +11,15 @@ use rusqlite::Connection; sol!(L1StandardBridge, "l1_standard_bridge_abi.json"); use crate::L1StandardBridge::{ETHBridgeFinalized, ETHBridgeInitiated, L1StandardBridgeEvents}; +const OP_BRIDGES: [Address; 6] = [ + address!("3154Cf16ccdb4C6d922629664174b904d80F2C35"), + address!("3a05E5d33d7Ab3864D53aaEc93c8301C1Fa49115"), + address!("697402166Fbf2F22E970df8a6486Ef171dbfc524"), + address!("99C9fc46f92E8a1c0deC1b1747d010903E884bE1"), + address!("735aDBbE72226BD52e818E7181953f42E3b0FF21"), + address!("3B95bC951EE0f553ba487327278cAc44f29715E5"), +]; + /// Initializes the ExEx. /// /// Opens up a SQLite database and creates the tables (if they don't exist). @@ -213,8 +222,14 @@ fn decode_chain_into_events( .zip(receipts.iter().flatten()) .map(move |(tx, receipt)| (block, tx, receipt)) }) - // Get all logs - .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Get all logs from expected bridge contracts + .flat_map(|(block, tx, receipt)| { + receipt + .logs + .iter() + .filter(|log| OP_BRIDGES.contains(&log.address)) + .map(move |log| (block, tx, log)) + }) // Decode and filter bridge events .filter_map(|(block, tx, log)| { L1StandardBridgeEvents::decode_raw_log(log.topics(), &log.data.data, true) diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs index f3e7f00bc..f1af0c1ae 100644 --- a/examples/exex/rollup/src/main.rs +++ b/examples/exex/rollup/src/main.rs @@ -243,10 +243,14 @@ fn decode_chain_into_rollup_events( .zip(receipts.iter().flatten()) .map(move |(tx, receipt)| (block, tx, receipt)) }) - // Filter only transactions to the rollup contract - .filter(|(_, tx, _)| tx.to() == Some(ROLLUP_CONTRACT_ADDRESS)) - // Get all logs - .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Get all logs from rollup contract + .flat_map(|(block, tx, receipt)| { + receipt + .logs + .iter() + .filter(|log| log.address == ROLLUP_CONTRACT_ADDRESS) + .map(move |log| (block, tx, log)) + }) // Decode and filter rollup events .filter_map(|(block, tx, log)| { RollupContractEvents::decode_raw_log(log.topics(), &log.data.data, true) From 68920b830f21ebbd3b28797813876a4ba173bce7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 May 2024 13:08:20 +0200 Subject: [PATCH 203/250] feat: add exex for in memory state (#8108) --- Cargo.lock | 12 ++++++ Cargo.toml | 4 +- examples/README.md | 11 ++--- examples/exex/in-memory-state/Cargo.toml | 15 +++++++ examples/exex/in-memory-state/src/main.rs | 49 +++++++++++++++++++++++ 5 files changed, 83 insertions(+), 8 deletions(-) create mode 100644 examples/exex/in-memory-state/Cargo.toml create mode 100644 examples/exex/in-memory-state/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 4b3305491..1dc869a93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2912,6 +2912,18 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "exex-in-memory-state" +version = "0.0.0" +dependencies = [ + "eyre", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-ethereum", + "reth-tracing", +] + [[package]] name = "exex-minimal" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 55ff51720..7eed06b7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,9 +87,7 @@ members = [ "examples/txpool-tracing/", "examples/polygon-p2p/", "examples/custom-inspector/", - "examples/exex/minimal/", - "examples/exex/op-bridge/", - "examples/exex/rollup/", + "examples/exex/*", "examples/db-access", "testing/ef-tests/", "testing/testing-utils", diff --git a/examples/README.md b/examples/README.md index 0885aa294..4c135f880 100644 --- a/examples/README.md +++ b/examples/README.md @@ -23,11 +23,12 @@ to make a PR! ## ExEx -| Example | Description | -| ---------------------------------- | --------------------------------------------------------------------------------- | -| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | -| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | -| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| Example | Description | +|-------------------------------------------|-----------------------------------------------------------------------------------| +| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | ## RPC diff --git a/examples/exex/in-memory-state/Cargo.toml b/examples/exex/in-memory-state/Cargo.toml new file mode 100644 index 000000000..c7fd34ea5 --- /dev/null +++ b/examples/exex/in-memory-state/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "exex-in-memory-state" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-ethereum.workspace = true +reth-tracing.workspace = true + +eyre.workspace = true diff --git a/examples/exex/in-memory-state/src/main.rs b/examples/exex/in-memory-state/src/main.rs new file mode 100644 index 000000000..451bb9c42 --- /dev/null +++ b/examples/exex/in-memory-state/src/main.rs @@ -0,0 +1,49 @@ +#![warn(unused_crate_dependencies)] + +use reth::providers::BundleStateWithReceipts; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +/// An ExEx that keeps track of the entire state in memory +async fn track_state(mut ctx: ExExContext) -> eyre::Result<()> { + // keeps the entire plain state of the chain in memory + let mut state = BundleStateWithReceipts::default(); + + while let Some(notification) = ctx.notifications.recv().await { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + // revert to block before the reorg + state.revert_to(new.first().number - 1); + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + state.revert_to(old.first().number - 1); + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + // extend the state with the new chain + state.extend(committed_chain.state().clone()); + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("in-memory-state", |ctx| async move { Ok(track_state(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} From 00a02f5b5c4c6d36d9d6f38fd445311a7d9da0b5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 6 May 2024 13:14:57 +0200 Subject: [PATCH 204/250] chore: remote `try_` prefix from block to payload conversion methods (#8117) --- crates/consensus/beacon/src/engine/mod.rs | 14 +++++++------- crates/ethereum/engine-primitives/src/payload.rs | 4 ++-- crates/payload/optimism/src/payload.rs | 4 ++-- crates/rpc/rpc-builder/tests/it/auth.rs | 4 ++-- crates/rpc/rpc-engine-api/tests/it/payload.rs | 8 ++++---- crates/rpc/rpc-types-compat/src/engine/mod.rs | 2 +- .../rpc/rpc-types-compat/src/engine/payload.rs | 16 ++++++++-------- 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 3e12c5f8e..d3c5bfe09 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1906,7 +1906,7 @@ mod tests { use reth_primitives::{stage::StageCheckpoint, ChainSpecBuilder, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; - use reth_rpc_types_compat::engine::payload::try_block_to_payload_v1; + use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::oneshot::error::TryRecvError; @@ -1968,7 +1968,7 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(try_block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2425,7 +2425,7 @@ mod tests { // Send new payload let res = env .send_new_payload( - try_block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), + block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), None, ) .await; @@ -2436,7 +2436,7 @@ mod tests { // Send new payload let res = env .send_new_payload( - try_block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), + block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), None, ) .await; @@ -2492,7 +2492,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) .await .unwrap(); @@ -2606,7 +2606,7 @@ mod tests { // Send new payload let parent = rng.gen(); let block = random_block(&mut rng, 2, Some(parent), None, Some(0)); - let res = env.send_new_payload(try_block_to_payload_v1(block), None).await; + let res = env.send_new_payload(block_to_payload_v1(block), None).await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2673,7 +2673,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) .await .unwrap(); diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 264355ac2..55a97c96d 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -11,7 +11,7 @@ use reth_rpc_types::engine::{ PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; @@ -91,7 +91,7 @@ impl<'a> BuiltPayload for &'a EthBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { - try_block_to_payload_v1(value.block) + block_to_payload_v1(value.block) } } diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index b90d05d5f..9cd47ef42 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -16,7 +16,7 @@ use reth_rpc_types::engine::{ OptimismPayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, try_block_to_payload_v1, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use revm::primitives::HandlerCfg; use std::sync::Arc; @@ -230,7 +230,7 @@ impl<'a> BuiltPayload for &'a OptimismBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: OptimismBuiltPayload) -> Self { - try_block_to_payload_v1(value.block) + block_to_payload_v1(value.block) } } diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 4b95d11ed..b5416bf67 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -8,7 +8,7 @@ use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use reth_rpc_types_compat::engine::payload::{ - convert_block_to_payload_input_v2, try_block_to_payload_v1, + block_to_payload_v1, convert_block_to_payload_input_v2, }; #[allow(unused_must_use)] async fn test_basic_engine_calls(client: &C) @@ -17,7 +17,7 @@ where C: EngineApiClient, { let block = Block::default().seal_slow(); - EngineApiClient::new_payload_v1(client, try_block_to_payload_v1(block.clone())).await; + EngineApiClient::new_payload_v1(client, block_to_payload_v1(block.clone())).await; EngineApiClient::new_payload_v2(client, convert_block_to_payload_input_v2(block)).await; EngineApiClient::fork_choice_updated_v1(client, ForkchoiceState::default(), None).await; EngineApiClient::get_payload_v1(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 0979af400..22219584c 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -13,8 +13,8 @@ use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; use reth_rpc_types_compat::engine::payload::{ - convert_to_payload_body_v1, try_block_to_payload, try_block_to_payload_v1, - try_into_sealed_block, try_payload_v1_to_block, + block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, + try_payload_v1_to_block, }; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { @@ -23,7 +23,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi // Recalculate roots transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.ommers); - try_block_to_payload(SealedBlock { + block_to_payload(SealedBlock { header: transformed.header.seal_slow(), body: transformed.body, ommers: transformed.ommers, @@ -89,7 +89,7 @@ fn payload_validation() { ); // Invalid encoded transactions - let mut payload_with_invalid_txs: ExecutionPayloadV1 = try_block_to_payload_v1(block.clone()); + let mut payload_with_invalid_txs: ExecutionPayloadV1 = block_to_payload_v1(block.clone()); payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { *tx = Bytes::new().into(); diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs index e14b83500..aa7456250 100644 --- a/crates/rpc/rpc-types-compat/src/engine/mod.rs +++ b/crates/rpc/rpc-types-compat/src/engine/mod.rs @@ -1,3 +1,3 @@ //! Standalone functions for engine specific rpc type conversions pub mod payload; -pub use payload::{try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; +pub use payload::{block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index fdacab4e6..3ab9a74b9 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -84,21 +84,21 @@ pub fn try_payload_v3_to_block(payload: ExecutionPayloadV3) -> Result ExecutionPayload { +pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) } else if value.withdrawals.is_some() { // block with withdrawals: V2 - ExecutionPayload::V2(try_block_to_payload_v2(value)) + ExecutionPayload::V2(block_to_payload_v2(value)) } else { // otherwise V1 - ExecutionPayload::V1(try_block_to_payload_v1(value)) + ExecutionPayload::V1(block_to_payload_v1(value)) } } /// Converts [SealedBlock] to [ExecutionPayloadV1] -pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { +pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -119,7 +119,7 @@ pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { } /// Converts [SealedBlock] to [ExecutionPayloadV2] -pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { +pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { let transactions = value.raw_transactions(); ExecutionPayloadV2 { @@ -176,9 +176,9 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 if value.withdrawals.is_some() { - ExecutionPayloadFieldV2::V2(try_block_to_payload_v2(value)) + ExecutionPayloadFieldV2::V2(block_to_payload_v2(value)) } else { - ExecutionPayloadFieldV2::V1(try_block_to_payload_v1(value)) + ExecutionPayloadFieldV2::V1(block_to_payload_v1(value)) } } @@ -205,7 +205,7 @@ pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> Ex pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { ExecutionPayloadInputV2 { withdrawals: value.withdrawals.clone().map(Withdrawals::into_inner), - execution_payload: try_block_to_payload_v1(value), + execution_payload: block_to_payload_v1(value), } } From 1d9894fbee15bcfb7b3f7b630440db2397f1a674 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 6 May 2024 14:25:20 +0200 Subject: [PATCH 205/250] chore: bump alloy 17c5650 (#8118) --- Cargo.lock | 104 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 26 +++++++------- 2 files changed, 65 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1dc869a93..1a984e04d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "c-kzg", "serde", ] @@ -177,11 +177,11 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "arbitrary", "c-kzg", "derive_more", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "serde", "serde_json", ] @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "k256", "serde_json", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -426,24 +426,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -456,11 +456,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "serde", "serde_json", ] @@ -468,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "serde", @@ -488,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-primitives", "async-trait", @@ -501,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -576,7 +576,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -594,7 +594,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=0bb7604#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -2964,7 +2964,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -6586,8 +6586,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "arbitrary", "bytes", @@ -6792,9 +6792,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7531,8 +7531,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7797,10 +7797,10 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7827,7 +7827,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7933,7 +7933,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "reth-primitives", "secp256k1", ] @@ -8067,10 +8067,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=42f01d0#42f01d08219f1b4fcb409b82377ec999919002de" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=089efac#089efacf72e7583630841b7027c46a3cb2f9c28b" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=0bb7604)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", diff --git a/Cargo.toml b/Cargo.toml index 7eed06b7f..e48db1712 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "42f01d0" } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "089efac" } # eth alloy-chains = "0.1.15" @@ -289,20 +289,20 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "0bb7604" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "0bb7604" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "17c5650" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } # misc auto_impl = "1" From 7fd091536f6f46d084fca460697035d558504981 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Mon, 6 May 2024 19:40:55 +0700 Subject: [PATCH 206/250] fix(rpc_server): remember to set ipc config for auth server (#8120) Co-authored-by: dzung --- crates/node-core/src/args/rpc_server.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index 1a60aa31a..e19a88737 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -475,7 +475,9 @@ impl RethRpcConfig for RpcServerArgs { let mut builder = AuthServerConfig::builder(jwt_secret).socket_addr(address); if self.auth_ipc { - builder = builder.ipc_endpoint(self.auth_ipc_path.clone()); + builder = builder + .ipc_endpoint(self.auth_ipc_path.clone()) + .with_ipc_config(self.ipc_server_builder()); } Ok(builder.build()) } From 5e778317fb6ef0809fbd399a583ac6fb9befad2b Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Mon, 6 May 2024 18:37:25 +0530 Subject: [PATCH 207/250] convert ```OptimismBlockExecution``` error variant into a general purpose error variant (#8100) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 4 +- crates/ethereum/evm/src/execute.rs | 5 +- .../interfaces/src/blockchain_tree/error.rs | 3 +- crates/interfaces/src/error.rs | 2 +- crates/interfaces/src/executor.rs | 43 +++---- crates/optimism/evm/Cargo.toml | 1 + crates/optimism/evm/src/error.rs | 29 +++++ crates/optimism/evm/src/execute.rs | 26 ++--- crates/optimism/evm/src/l1.rs | 109 +++++++----------- crates/optimism/evm/src/lib.rs | 2 + crates/storage/provider/src/chain.rs | 2 +- 12 files changed, 111 insertions(+), 116 deletions(-) create mode 100644 crates/optimism/evm/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 1a984e04d..4fa6fc73d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7007,6 +7007,7 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", + "thiserror", "tracing", ] diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 5346eafbd..64d311549 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -2162,7 +2162,7 @@ mod tests { .assert(&tree); // unwind canonical - assert_eq!(tree.unwind(block1.number), Ok(())); + assert!(tree.unwind(block1.number).is_ok()); // Trie state: // b2 b2a (pending block) // / / @@ -2226,7 +2226,7 @@ mod tests { .assert(&tree); // update canonical block to b2, this would make b2a be removed - assert_eq!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12), Ok(())); + assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); assert_eq!( tree.is_block_known(block2.num_hash()).unwrap(), diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index ff3a4e76d..db361f35d 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -523,9 +523,10 @@ mod tests { .expect_err( "Executing cancun block without parent beacon block root field should fail", ); + assert_eq!( - err, - BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) + err.as_validation().unwrap().clone(), + BlockValidationError::MissingParentBeaconBlockRoot ); // fix header, set a gas limit diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index b63698576..b805c6ee8 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -293,8 +293,7 @@ impl InsertBlockErrorKind { BlockExecutionError::CanonicalCommit { .. } | BlockExecutionError::AppendChainDoesntConnect { .. } | BlockExecutionError::UnavailableForTest => false, - #[cfg(feature = "optimism")] - BlockExecutionError::OptimismBlockExecution(_) => false, + BlockExecutionError::Other(_) => false, } } InsertBlockErrorKind::Tree(err) => { diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index df307ae09..38498c312 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -16,7 +16,7 @@ pub type RethResult = Result; /// This enum encapsulates various error types that can occur during blockchain interactions. /// /// It allows for structured error handling based on the nature of the encountered issue. -#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] +#[derive(Debug, thiserror::Error)] pub enum RethError { /// Error encountered during block execution. #[error(transparent)] diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index e8f7f40b1..04b9832f0 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -80,7 +80,7 @@ pub enum BlockValidationError { } /// BlockExecutor Errors -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping `BlockValidationError` #[error(transparent)] @@ -118,35 +118,28 @@ pub enum BlockExecutionError { /// Error when fetching latest block state. #[error(transparent)] LatestBlock(#[from] ProviderError), - /// Optimism Block Executor Errors - #[cfg(feature = "optimism")] #[error(transparent)] - OptimismBlockExecution(#[from] OptimismBlockExecutionError), -} - -/// Optimism Block Executor Errors -#[cfg(feature = "optimism")] -#[derive(Error, Debug, Clone, PartialEq, Eq)] -pub enum OptimismBlockExecutionError { - /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message:?}")] - L1BlockInfoError { - /// The inner error message - message: String, - }, - /// Thrown when force deploy of create2deployer code fails. - #[error("failed to force create2deployer account code")] - ForceCreate2DeployerFail, - /// Thrown when a blob transaction is included in a sequencer's block. - #[error("blob transaction included in sequencer block")] - BlobTransactionRejected, - /// Thrown when a database account could not be loaded. - #[error("failed to load account {0}")] - AccountLoadFailed(reth_primitives::Address), + Other(Box), } impl BlockExecutionError { + /// Create a new `BlockExecutionError::Other` variant. + pub fn other(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Other(Box::new(error)) + } + + /// Returns the inner `BlockValidationError` if the error is a validation error. + pub const fn as_validation(&self) -> Option<&BlockValidationError> { + match self { + Self::Validation(err) => Some(err), + _ => None, + } + } + /// Returns `true` if the error is fatal. /// /// This represents an unrecoverable database related error. diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 8e5afc5ef..4e5fd2f19 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -23,6 +23,7 @@ revm.workspace = true revm-primitives.workspace = true # misc +thiserror.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs new file mode 100644 index 000000000..de923d44c --- /dev/null +++ b/crates/optimism/evm/src/error.rs @@ -0,0 +1,29 @@ +//! Error types for the Optimism EVM module. + +use reth_interfaces::executor::BlockExecutionError; + +/// Optimism Block Executor Errors +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum OptimismBlockExecutionError { + /// Error when trying to parse L1 block info + #[error("could not get L1 block info from L2 block: {message:?}")] + L1BlockInfoError { + /// The inner error message + message: String, + }, + /// Thrown when force deploy of create2deployer code fails. + #[error("failed to force create2deployer account code")] + ForceCreate2DeployerFail, + /// Thrown when a blob transaction is included in a sequencer's block. + #[error("blob transaction included in sequencer block")] + BlobTransactionRejected, + /// Thrown when a database account could not be loaded. + #[error("failed to load account {0}")] + AccountLoadFailed(reth_primitives::Address), +} + +impl From for BlockExecutionError { + fn from(err: OptimismBlockExecutionError) -> Self { + BlockExecutionError::other(err) + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index d19d441a8..c6bb5c7cf 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,6 +1,9 @@ //! Optimism block executor. -use crate::{l1::ensure_create2_deployer, verify::verify_receipts, OptimismEvmConfig}; +use crate::{ + l1::ensure_create2_deployer, verify::verify_receipts, OptimismBlockExecutionError, + OptimismEvmConfig, +}; use reth_evm::{ execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, @@ -9,7 +12,7 @@ use reth_evm::{ ConfigureEvm, ConfigureEvmEnv, }; use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, + executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; use reth_primitives::{ @@ -141,13 +144,8 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( - |_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - }, - )?; + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) + .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.len()); @@ -167,9 +165,7 @@ where // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )); + return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()); } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -184,11 +180,7 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; + .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; let mut buf = Vec::with_capacity(transaction.length_without_header()); transaction.encode_enveloped(&mut buf); diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 896cbc36a..7b605448f 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,9 +1,7 @@ //! Optimism-specific implementation and utilities for the executor -use reth_interfaces::{ - executor::{self as reth_executor, BlockExecutionError}, - RethError, -}; +use crate::OptimismBlockExecutionError; +use reth_interfaces::{executor::BlockExecutionError, RethError}; use reth_primitives::{address, b256, hex, Address, Block, Bytes, ChainSpec, Hardfork, B256, U256}; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -29,20 +27,19 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(block: &Block) -> Result { +pub fn extract_l1_info(block: &Block) -> Result { let l1_info_tx_data = block .body .first() - .ok_or(reth_executor::OptimismBlockExecutionError::L1BlockInfoError { + .ok_or(OptimismBlockExecutionError::L1BlockInfoError { message: "could not find l1 block info tx in the L2 block".to_string(), }) .map(|tx| tx.input())?; if l1_info_tx_data.len() < 4 { - return Err(reth_executor::OptimismBlockExecutionError::L1BlockInfoError { + return Err(OptimismBlockExecutionError::L1BlockInfoError { message: "invalid l1 block info transaction calldata in the L2 block".to_string(), - } - .into()) + }) } // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the @@ -56,7 +53,7 @@ pub fn extract_l1_info(block: &Block) -> Result Result { +pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: // 32 bytes for the block number @@ -68,33 +65,25 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result { +pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "unexpected l1 block info tx calldata length found".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "unexpected l1 block info tx calldata length found".to_string(), + }) } let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[8..12]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee scalar".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee scalar".to_string(), + }, )?; let l1_base_fee_scalar = U256::try_from_be_slice(&data[12..16]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 base fee scalar".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 base fee scalar".to_string(), + }, )?; let l1_base_fee = U256::try_from_be_slice(&data[32..64]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee".to_string(), + }, )?; let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee".to_string(), + }, )?; let mut l1block = L1BlockInfo::default(); @@ -216,11 +195,10 @@ impl RethL1BlockInfo for L1BlockInfo { } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "Optimism hardforks are not active".to_string(), + } + .into()) }; Ok(self.calculate_tx_l1_cost(input, spec_id)) } @@ -236,11 +214,10 @@ impl RethL1BlockInfo for L1BlockInfo { } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "Optimism hardforks are not active".to_string(), + } + .into()) }; Ok(self.data_gas(input, spec_id)) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index c51265983..748eeab7b 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -22,7 +22,9 @@ pub use execute::*; pub mod l1; pub use l1::*; +mod error; pub mod verify; +pub use error::OptimismBlockExecutionError; /// Optimism-related EVM configuration. #[derive(Debug, Default, Clone, Copy)] diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index a596d93ea..9b9c66d4b 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -498,7 +498,7 @@ mod tests { let chain2 = Chain { blocks: BTreeMap::from([(3, block3), (4, block4)]), ..Default::default() }; - assert_eq!(chain1.append_chain(chain2.clone()), Ok(())); + assert!(chain1.append_chain(chain2.clone()).is_ok()); // chain1 got changed so this will fail assert!(chain1.append_chain(chain2).is_err()); From b77473cfab52cd6aae06834df59dbc0cd8edee7f Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 6 May 2024 16:54:28 +0200 Subject: [PATCH 208/250] fix: add checks for hardfork-specific fields to ensure_well_formed_payload (#7993) --- crates/payload/validator/src/lib.rs | 45 ++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index c3b25aef9..6b95b0425 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -38,6 +38,12 @@ impl ExecutionPayloadValidator { self.chain_spec().is_cancun_active_at_timestamp(timestamp) } + /// Returns true if the Shanghai hardfork is active at the given timestamp. + #[inline] + fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { + self.chain_spec().is_shanghai_active_at_timestamp(timestamp) + } + /// Cancun specific checks for EIP-4844 blob transactions. /// /// Ensures that the number of blob versioned hashes matches the number hashes included in the @@ -114,11 +120,42 @@ impl ExecutionPayloadValidator { }) } - let cancun_active = self.is_cancun_active_at_timestamp(sealed_block.timestamp); + if self.is_cancun_active_at_timestamp(sealed_block.timestamp) { + if sealed_block.header.blob_gas_used.is_none() { + // cancun active but blob gas used not present + return Err(PayloadError::PostCancunBlockWithoutBlobGasUsed) + } + if sealed_block.header.excess_blob_gas.is_none() { + // cancun active but excess blob gas not present + return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) + } + if cancun_fields.as_ref().is_none() { + // cancun active but cancun fields not present + return Err(PayloadError::PostCancunWithoutCancunFields) + } + } else { + if sealed_block.has_blob_transactions() { + // cancun not active but blob transactions present + return Err(PayloadError::PreCancunBlockWithBlobTransactions) + } + if sealed_block.header.blob_gas_used.is_some() { + // cancun not active but blob gas used present + return Err(PayloadError::PreCancunBlockWithBlobGasUsed) + } + if sealed_block.header.excess_blob_gas.is_some() { + // cancun not active but excess blob gas present + return Err(PayloadError::PreCancunBlockWithExcessBlobGas) + } + if cancun_fields.as_ref().is_some() { + // cancun not active but cancun fields present + return Err(PayloadError::PreCancunWithCancunFields) + } + } - if !cancun_active && sealed_block.has_blob_transactions() { - // cancun not active but blob transactions present - return Err(PayloadError::PreCancunBlockWithBlobTransactions) + let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); + if !shanghai_active && sealed_block.withdrawals.is_some() { + // shanghai not active but withdrawals present + return Err(PayloadError::PreShanghaiBlockWithWitdrawals); } // EIP-4844 checks From 614e1bccd0cfdd723bee1fcfd4577c9af23f8c1a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 May 2024 17:00:35 +0200 Subject: [PATCH 209/250] chore: move NodeRecord type (#8121) --- Cargo.lock | 12 +- Cargo.toml | 1 + crates/net/types/Cargo.toml | 15 +- crates/net/types/src/lib.rs | 8 +- crates/net/types/src/node_record.rs | 362 ++++++++++++++++++++++++++++ crates/primitives/Cargo.toml | 1 + crates/primitives/src/net.rs | 2 +- crates/rpc/rpc-types/Cargo.toml | 15 +- crates/rpc/rpc-types/src/lib.rs | 1 + crates/rpc/rpc-types/src/net.rs | 357 --------------------------- crates/rpc/rpc-types/src/peer.rs | 5 - 11 files changed, 391 insertions(+), 388 deletions(-) create mode 100644 crates/net/types/src/node_record.rs diff --git a/Cargo.lock b/Cargo.lock index 4fa6fc73d..14a5dd752 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7229,10 +7229,14 @@ name = "reth-network-types" version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", + "alloy-rlp", "enr", - "reth-rpc-types", + "rand 0.8.5", "secp256k1", + "serde_json", "serde_with", + "thiserror", + "url", ] [[package]] @@ -7558,6 +7562,7 @@ dependencies = [ "rayon", "reth-codecs", "reth-ethereum-forks", + "reth-network-types", "reth-rpc-types", "revm", "revm-primitives", @@ -7798,29 +7803,24 @@ dependencies = [ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-primitives", - "alloy-rlp", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", "arbitrary", "bytes", - "enr", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", "proptest", "proptest-derive", "rand 0.8.5", - "secp256k1", "serde", "serde_json", "serde_with", "similar-asserts", "thiserror", - "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e48db1712..dc693e94f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,6 +338,7 @@ smallvec = "1" dyn-clone = "1.0.17" sha2 = { version = "0.10", default-features = false } paste = "1.0" +url = "2.3" # proc-macros proc-macro2 = "1.0" diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml index 9092236b1..9be9a2f3a 100644 --- a/crates/net/types/Cargo.toml +++ b/crates/net/types/Cargo.toml @@ -12,17 +12,22 @@ description = "Network types and utils" workspace = true [dependencies] -# reth -reth-rpc-types.workspace = true - -alloy-primitives.workspace = true # eth +alloy-primitives = { workspace = true, features = ["rlp"] } +alloy-rlp = { workspace = true, features = ["derive"] } enr.workspace = true # crypto -secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } +secp256k1.workspace = true # misc serde_with.workspace = true +thiserror.workspace = true +url.workspace = true +[dev-dependencies] +alloy-primitives = { workspace = true, features = ["rand"] } +rand.workspace = true +secp256k1 = { workspace = true, features = ["rand"] } +serde_json.workspace = true diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs index 8d75af933..e4b9f28a4 100644 --- a/crates/net/types/src/lib.rs +++ b/crates/net/types/src/lib.rs @@ -11,12 +11,18 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_primitives::B512; use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; use std::{net::IpAddr, str::FromStr}; // Re-export PeerId for ease of use. pub use enr::Enr; -pub use reth_rpc_types::{NodeRecord, PeerId}; + +/// Alias for a peer identifier +pub type PeerId = B512; + +pub mod node_record; +pub use node_record::{NodeRecord, NodeRecordParseError}; /// This tag should be set to indicate to libsecp256k1 that the following bytes denote an /// uncompressed pubkey. diff --git a/crates/net/types/src/node_record.rs b/crates/net/types/src/node_record.rs new file mode 100644 index 000000000..5a6706201 --- /dev/null +++ b/crates/net/types/src/node_record.rs @@ -0,0 +1,362 @@ +//! Commonly used NodeRecord type for peers. + +use std::{ + fmt, + fmt::Write, + net::{IpAddr, Ipv4Addr, SocketAddr}, + num::ParseIntError, + str::FromStr, +}; + +use crate::{pk2id, PeerId}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use enr::Enr; +use secp256k1::{SecretKey, SECP256K1}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; + +/// Represents a ENR in discovery. +/// +/// Note: this is only an excerpt of the [`NodeRecord`] data structure. +#[derive( + Clone, + Copy, + Debug, + Eq, + PartialEq, + Hash, + SerializeDisplay, + DeserializeFromStr, + RlpEncodable, + RlpDecodable, +)] +pub struct NodeRecord { + /// The Address of a node. + pub address: IpAddr, + /// TCP port of the port that accepts connections. + pub tcp_port: u16, + /// UDP discovery port. + pub udp_port: u16, + /// Public key of the discovery service + pub id: PeerId, +} + +impl NodeRecord { + /// Derive the [`NodeRecord`] from the secret key and addr + pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { + let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); + let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + Self::new(addr, id) + } + + /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped + /// [Ipv6Addr](std::net::Ipv6Addr). + /// + /// Returns `true` if the address was converted. + /// + /// See also [std::net::Ipv6Addr::to_ipv4_mapped] + pub fn convert_ipv4_mapped(&mut self) -> bool { + // convert IPv4 mapped IPv6 address + if let IpAddr::V6(v6) = self.address { + if let Some(v4) = v6.to_ipv4_mapped() { + self.address = v4.into(); + return true + } + } + false + } + + /// Same as [Self::convert_ipv4_mapped] but consumes the type + pub fn into_ipv4_mapped(mut self) -> Self { + self.convert_ipv4_mapped(); + self + } + + /// Creates a new record from a socket addr and peer id. + #[allow(dead_code)] + pub fn new(addr: SocketAddr, id: PeerId) -> Self { + Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } + } + + /// The TCP socket address of this node + #[must_use] + pub fn tcp_addr(&self) -> SocketAddr { + SocketAddr::new(self.address, self.tcp_port) + } + + /// The UDP socket address of this node + #[must_use] + pub fn udp_addr(&self) -> SocketAddr { + SocketAddr::new(self.address, self.udp_port) + } +} + +impl fmt::Display for NodeRecord { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("enode://")?; + alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?; + f.write_char('@')?; + match self.address { + IpAddr::V4(ip) => { + ip.fmt(f)?; + } + IpAddr::V6(ip) => { + // encapsulate with brackets + f.write_char('[')?; + ip.fmt(f)?; + f.write_char(']')?; + } + } + f.write_char(':')?; + self.tcp_port.fmt(f)?; + if self.tcp_port != self.udp_port { + f.write_str("?discport=")?; + self.udp_port.fmt(f)?; + } + + Ok(()) + } +} + +/// Possible error types when parsing a [`NodeRecord`] +#[derive(Debug, thiserror::Error)] +pub enum NodeRecordParseError { + /// Invalid url + #[error("Failed to parse url: {0}")] + InvalidUrl(String), + /// Invalid id + #[error("Failed to parse id")] + InvalidId(String), + /// Invalid discport + #[error("Failed to discport query: {0}")] + Discport(ParseIntError), +} + +impl FromStr for NodeRecord { + type Err = NodeRecordParseError; + + fn from_str(s: &str) -> Result { + use url::{Host, Url}; + + let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?; + + let address = match url.host() { + Some(Host::Ipv4(ip)) => IpAddr::V4(ip), + Some(Host::Ipv6(ip)) => IpAddr::V6(ip), + Some(Host::Domain(ip)) => IpAddr::V4( + Ipv4Addr::from_str(ip) + .map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?, + ), + _ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))), + }; + let port = url + .port() + .ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?; + + let udp_port = if let Some(discovery_port) = url + .query_pairs() + .find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port)) + { + discovery_port.parse::().map_err(NodeRecordParseError::Discport)? + } else { + port + }; + + let id = url + .username() + .parse::() + .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; + + Ok(Self { address, id, tcp_port: port, udp_port }) + } +} + +impl TryFrom<&Enr> for NodeRecord { + type Error = NodeRecordParseError; + + fn try_from(enr: &Enr) -> Result { + let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) + else { + return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) + }; + + let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { + return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) + }; + + let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { + return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) + }; + + let id = pk2id(&enr.public_key()); + + Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) + } +} + +#[cfg(test)] +mod tests { + use std::net::Ipv6Addr; + + use alloy_rlp::Decodable; + use rand::{thread_rng, Rng, RngCore}; + + use super::*; + + #[test] + fn test_mapped_ipv6() { + let mut rng = thread_rng(); + + let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); + let v6 = v4.to_ipv6_mapped(); + + let record = NodeRecord { + address: v6.into(), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + assert!(record.clone().convert_ipv4_mapped()); + assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); + } + + #[test] + fn test_mapped_ipv4() { + let mut rng = thread_rng(); + let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); + + let record = NodeRecord { + address: v4.into(), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + assert!(!record.clone().convert_ipv4_mapped()); + assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); + } + + #[test] + fn test_noderecord_codec_ipv4() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut ip = [0u8; 4]; + rng.fill_bytes(&mut ip); + let record = NodeRecord { + address: IpAddr::V4(ip.into()), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); + assert_eq!(record, decoded); + } + } + + #[test] + fn test_noderecord_codec_ipv6() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut ip = [0u8; 16]; + rng.fill_bytes(&mut ip); + let record = NodeRecord { + address: IpAddr::V6(ip.into()), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); + assert_eq!(record, decoded); + } + } + + #[test] + fn test_url_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(node, NodeRecord { + address: IpAddr::V4([10,3,58,6].into()), + tcp_port: 30303, + udp_port: 30301, + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), + }) + } + + #[test] + fn test_node_display() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(url, &format!("{node}")); + } + + #[test] + fn test_node_display_discport() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(url, &format!("{node}")); + } + + #[test] + fn test_node_serialize() { + let cases = vec![ + // IPv4 + ( + NodeRecord { + address: IpAddr::V4([10, 3, 58, 6].into()), + tcp_port: 30303u16, + udp_port: 30301u16, + id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + }, + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"" + ), + // IPv6 + ( + NodeRecord { + address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), + tcp_port: 52150u16, + udp_port: 52151u16, + id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), + }, + "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", + ) + ]; + + for (node, expected) in cases { + let ser = serde_json::to_string::(&node).expect("couldn't serialize"); + assert_eq!(ser, expected); + } + } + + #[test] + fn test_node_deserialize() { + let cases = vec![ + // IPv4 + ( + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", + NodeRecord { + address: IpAddr::V4([10, 3, 58, 6].into()), + tcp_port: 30303u16, + udp_port: 30301u16, + id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + } + ), + // IPv6 + ( + "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", + NodeRecord { + address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), + tcp_port: 52150u16, + udp_port: 52151u16, + id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), + } + ), + ]; + + for (url, expected) in cases { + let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); + assert_eq!(node, expected); + } + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index deaee2300..675c7167f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-codecs.workspace = true reth-ethereum-forks.workspace = true +reth-network-types.workspace = true reth-rpc-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 068e47e5b..778e2658b 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -1,4 +1,4 @@ -pub use reth_rpc_types::{NodeRecord, NodeRecordParseError}; +pub use reth_network_types::{NodeRecord, NodeRecordParseError}; // Ethereum bootnodes come from // OP bootnodes come from diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 5f87e9482..1426b50f8 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -12,8 +12,8 @@ description = "Reth RPC types" workspace = true [dependencies] + # ethereum -alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-anvil.workspace = true @@ -21,8 +21,6 @@ alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } ethereum_ssz_derive = { version = "0.5", optional = true } ethereum_ssz = { version = "0.5", optional = true } -alloy-genesis.workspace = true -enr = { workspace = true, features = ["serde", "rust-secp256k1"] } # misc thiserror.workspace = true @@ -30,19 +28,10 @@ serde = { workspace = true, features = ["derive"] } serde_with = "3.3" serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } -url = "2.3" -# necessary so we don't hit a "undeclared 'std'": -# https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 -secp256k1.workspace = true - -# arbitrary -arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } [features] default = ["jsonrpsee-types"] -arbitrary = ["dep:arbitrary", "dep:proptest-derive", "dep:proptest", "alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] ssz = ["dep:ethereum_ssz" ,"dep:ethereum_ssz_derive", "alloy-primitives/ssz", "alloy-rpc-types/ssz", "alloy-rpc-types-engine/ssz"] diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 0adcab0f3..01ed0f911 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -8,6 +8,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] pub mod beacon; mod eth; diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs index d72d00fa5..b434bcbf8 100644 --- a/crates/rpc/rpc-types/src/net.rs +++ b/crates/rpc/rpc-types/src/net.rs @@ -1,19 +1,5 @@ -use crate::{pk_to_id, PeerId}; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use alloy_rpc_types::admin::EthProtocolInfo; -use enr::Enr; -use secp256k1::{SecretKey, SECP256K1}; use serde::{Deserialize, Serialize}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::{ - fmt, - fmt::Write, - net::{IpAddr, Ipv4Addr, SocketAddr}, - num::ParseIntError, - str::FromStr, -}; -use thiserror::Error; -use url::{Host, Url}; /// The status of the network being ran by the local node. #[derive(Clone, Debug, Serialize, Deserialize)] @@ -25,346 +11,3 @@ pub struct NetworkStatus { /// Information about the Ethereum Wire Protocol. pub eth_protocol_info: EthProtocolInfo, } - -/// Represents a ENR in discovery. -/// -/// Note: this is only an excerpt of the [`NodeRecord`] data structure. -#[derive( - Clone, - Copy, - Debug, - Eq, - PartialEq, - Hash, - SerializeDisplay, - DeserializeFromStr, - RlpEncodable, - RlpDecodable, -)] -pub struct NodeRecord { - /// The Address of a node. - pub address: IpAddr, - /// TCP port of the port that accepts connections. - pub tcp_port: u16, - /// UDP discovery port. - pub udp_port: u16, - /// Public key of the discovery service - pub id: PeerId, -} - -impl NodeRecord { - /// Derive the [`NodeRecord`] from the secret key and addr - pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { - let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); - let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); - Self::new(addr, id) - } - - /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped - /// [Ipv6Addr](std::net::Ipv6Addr). - /// - /// Returns `true` if the address was converted. - /// - /// See also [std::net::Ipv6Addr::to_ipv4_mapped] - pub fn convert_ipv4_mapped(&mut self) -> bool { - // convert IPv4 mapped IPv6 address - if let IpAddr::V6(v6) = self.address { - if let Some(v4) = v6.to_ipv4_mapped() { - self.address = v4.into(); - return true - } - } - false - } - - /// Same as [Self::convert_ipv4_mapped] but consumes the type - pub fn into_ipv4_mapped(mut self) -> Self { - self.convert_ipv4_mapped(); - self - } - - /// Creates a new record from a socket addr and peer id. - #[allow(dead_code)] - pub fn new(addr: SocketAddr, id: PeerId) -> Self { - Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } - } - - /// The TCP socket address of this node - #[must_use] - pub fn tcp_addr(&self) -> SocketAddr { - SocketAddr::new(self.address, self.tcp_port) - } - - /// The UDP socket address of this node - #[must_use] - pub fn udp_addr(&self) -> SocketAddr { - SocketAddr::new(self.address, self.udp_port) - } -} - -impl fmt::Display for NodeRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("enode://")?; - alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?; - f.write_char('@')?; - match self.address { - IpAddr::V4(ip) => { - ip.fmt(f)?; - } - IpAddr::V6(ip) => { - // encapsulate with brackets - f.write_char('[')?; - ip.fmt(f)?; - f.write_char(']')?; - } - } - f.write_char(':')?; - self.tcp_port.fmt(f)?; - if self.tcp_port != self.udp_port { - f.write_str("?discport=")?; - self.udp_port.fmt(f)?; - } - - Ok(()) - } -} - -/// Possible error types when parsing a [`NodeRecord`] -#[derive(Debug, Error)] -pub enum NodeRecordParseError { - /// Invalid url - #[error("Failed to parse url: {0}")] - InvalidUrl(String), - /// Invalid id - #[error("Failed to parse id")] - InvalidId(String), - /// Invalid discport - #[error("Failed to discport query: {0}")] - Discport(ParseIntError), -} - -impl FromStr for NodeRecord { - type Err = NodeRecordParseError; - - fn from_str(s: &str) -> Result { - let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?; - - let address = match url.host() { - Some(Host::Ipv4(ip)) => IpAddr::V4(ip), - Some(Host::Ipv6(ip)) => IpAddr::V6(ip), - Some(Host::Domain(ip)) => IpAddr::V4( - Ipv4Addr::from_str(ip) - .map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?, - ), - _ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))), - }; - let port = url - .port() - .ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?; - - let udp_port = if let Some(discovery_port) = url - .query_pairs() - .find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port)) - { - discovery_port.parse::().map_err(NodeRecordParseError::Discport)? - } else { - port - }; - - let id = url - .username() - .parse::() - .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; - - Ok(Self { address, id, tcp_port: port, udp_port }) - } -} - -impl TryFrom<&Enr> for NodeRecord { - type Error = NodeRecordParseError; - - fn try_from(enr: &Enr) -> Result { - let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) - else { - return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) - }; - - let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { - return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) - }; - - let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { - return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) - }; - - let id = pk_to_id(&enr.public_key()); - - Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_rlp::Decodable; - use rand::{thread_rng, Rng, RngCore}; - use std::net::Ipv6Addr; - - #[test] - fn test_mapped_ipv6() { - let mut rng = thread_rng(); - - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - let v6 = v4.to_ipv6_mapped(); - - let record = NodeRecord { - address: v6.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_mapped_ipv4() { - let mut rng = thread_rng(); - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - - let record = NodeRecord { - address: v4.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(!record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_noderecord_codec_ipv4() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 4]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V4(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_noderecord_codec_ipv6() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 16]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V6(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_url_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(node, NodeRecord { - address: IpAddr::V4([10,3,58,6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } - - #[test] - fn test_node_display() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_display_discport() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_serialize() { - let cases = vec![ - // IPv4 - ( - NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), - }, - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"" - ), - // IPv6 - ( - NodeRecord{ - address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), - tcp_port: 52150u16, - udp_port: 52151u16, - id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), - }, - "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", - ) - ]; - - for (node, expected) in cases { - let ser = serde_json::to_string::(&node).expect("couldn't serialize"); - assert_eq!(ser, expected); - } - } - - #[test] - fn test_node_deserialize() { - let cases = vec![ - // IPv4 - ( - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", - NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), - } - ), - // IPv6 - ( - "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", - NodeRecord{ - address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), - tcp_port: 52150u16, - udp_port: 52151u16, - id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), - } - ), - ]; - - for (url, expected) in cases { - let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); - assert_eq!(node, expected); - } - } -} diff --git a/crates/rpc/rpc-types/src/peer.rs b/crates/rpc/rpc-types/src/peer.rs index 44dbe5d71..a07e61d00 100644 --- a/crates/rpc/rpc-types/src/peer.rs +++ b/crates/rpc/rpc-types/src/peer.rs @@ -2,8 +2,3 @@ use alloy_primitives::B512; /// Alias for a peer identifier pub type PeerId = B512; - -/// Converts a [`secp256k1::PublicKey`] to a [`PeerId`]. -pub fn pk_to_id(pk: &secp256k1::PublicKey) -> PeerId { - PeerId::from_slice(&pk.serialize_uncompressed()[1..]) -} From c70b17a5548c5893042f89614fd5095de8f236aa Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Mon, 6 May 2024 18:50:40 +0200 Subject: [PATCH 210/250] feat: prague engine api types (#8119) --- Cargo.lock | 154 +++++++++--------- Cargo.toml | 40 +++-- crates/engine-primitives/src/lib.rs | 43 ++++- crates/optimism/node/src/engine.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 65 +++++++- .../rpc-types-compat/src/engine/payload.rs | 14 +- crates/rpc/rpc-types/src/beacon/payload.rs | 4 + 7 files changed, 216 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14a5dd752..36fa163dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "c-kzg", "serde", ] @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -171,17 +171,17 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "arbitrary", "c-kzg", "derive_more", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "serde", "serde_json", ] @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "k256", "serde_json", @@ -300,7 +300,7 @@ dependencies = [ "derive_arbitrary", "derive_more", "ethereum_ssz", - "getrandom 0.2.14", + "getrandom 0.2.15", "hex-literal", "itoa", "k256", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -426,24 +426,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "serde", ] [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -456,11 +456,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "serde", "serde_json", ] @@ -468,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "serde", @@ -478,7 +478,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" dependencies = [ "alloy-primitives", "serde", @@ -488,7 +488,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-primitives", "async-trait", @@ -501,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -557,7 +557,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -576,7 +576,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -594,7 +594,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=17c5650#17c5650f91472b3c0e29ea5cede6ce2bc7f87018" +source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -1546,9 +1546,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" dependencies = [ "jobserver", "libc", @@ -2964,7 +2964,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -3318,9 +3318,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -4594,7 +4594,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.14", + "getrandom 0.2.15", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -6132,7 +6132,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", ] [[package]] @@ -6226,7 +6226,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", "libredox", "thiserror", ] @@ -6586,8 +6586,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "arbitrary", "bytes", @@ -6792,9 +6792,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7536,8 +7536,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7804,7 +7804,7 @@ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rpc-types-anvil", "alloy-rpc-types-engine", "alloy-rpc-types-trace", @@ -7828,7 +7828,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7934,7 +7934,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "reth-primitives", "secp256k1", ] @@ -8068,10 +8068,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=089efac#089efacf72e7583630841b7027c46a3cb2f9c28b" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7d810bc#7d810bc44c08fe8ec90ebef556883c2531ebf111" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=17c5650)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -8173,7 +8173,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.15", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -8575,11 +8575,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -8588,9 +8588,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -9598,7 +9598,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -10060,7 +10060,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.15", ] [[package]] @@ -10463,9 +10463,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index dc693e94f..f8d3dcac2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,9 +278,14 @@ reth-node-events = { path = "crates/node/events" } reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } -revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "089efac" } +revm = { version = "8.0.0", features = [ + "std", + "secp256k1", +], default-features = false } +revm-primitives = { version = "3.1.0", features = [ + "std", +], default-features = false } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7d810bc" } # eth alloy-chains = "0.1.15" @@ -289,20 +294,20 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "17c5650" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "17c5650" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "c3ea7bc" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } # misc auto_impl = "1" @@ -377,7 +382,10 @@ secp256k1 = { version = "0.28", default-features = false, features = [ "recovery", ] } # TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 -enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } +enr = { version = "0.12.0", default-features = false, features = [ + "k256", + "rust-secp256k1", +] } # for eip-4844 c-kzg = "1.0.0" diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine-primitives/src/lib.rs index e144d0fcd..99edf521c 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine-primitives/src/lib.rs @@ -115,6 +115,29 @@ pub fn validate_payload_timestamp( // the payload does not fall within the time frame of the Cancun fork. return Err(EngineObjectValidationError::UnsupportedFork) } + + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + if version == EngineApiMessageVersion::V4 && !is_prague { + // From the Engine API spec: + // + // + // For `engine_getPayloadV4`: + // + // 1. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the built payload does not fall within the time frame of the Prague fork. + // + // For `engine_forkchoiceUpdatedV4`: + // + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the + // `payloadAttributes` is set and the `payloadAttributes.timestamp` does not fall within + // the time frame of the Prague fork. + // + // For `engine_newPayloadV4`: + // + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the payload does not fall within the time frame of the Prague fork. + return Err(EngineObjectValidationError::UnsupportedFork) + } Ok(()) } @@ -128,7 +151,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let is_shanghai_active = chain_spec.is_shanghai_active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -136,17 +159,17 @@ pub fn validate_withdrawals_presence( return Err(message_validation_kind .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) } - if is_shanghai { + if is_shanghai_active { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { - if is_shanghai && !has_withdrawals { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { + if is_shanghai_active && !has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } - if !is_shanghai && has_withdrawals { + if !is_shanghai_active && has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) } @@ -237,7 +260,7 @@ pub fn validate_parent_beacon_block_root_presence( )) } } - EngineApiMessageVersion::V3 => { + EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if !has_parent_beacon_block_root { return Err(validation_kind .to_error(VersionSpecificValidationError::NoParentBeaconBlockRootPostCancun)) @@ -321,10 +344,14 @@ pub enum EngineApiMessageVersion { V1, /// Version 2 /// - /// Added for shanghai hardfork. + /// Added in the Shanghai hardfork. V2, /// Version 3 /// - /// Added for cancun hardfork. + /// Added in the Cancun hardfork. V3, + /// Version 4 + /// + /// Added in the Prague hardfork. + V4, } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index f5c53d98e..7382d2184 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -74,7 +74,7 @@ pub fn validate_withdrawals_presence( .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if is_shanghai && !has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index eb3b1bfc7..0e4476bb7 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -12,8 +12,8 @@ use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFa use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, - PayloadStatus, TransitionConfiguration, CAPABILITIES, + ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, + PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -148,6 +148,30 @@ where Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) } + /// See also + pub async fn new_payload_v4( + &self, + payload: ExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> EngineApiResult { + let payload = ExecutionPayload::from(payload); + let payload_or_attrs = + PayloadOrAttributes::<'_, EngineT::PayloadAttributes>::from_execution_payload( + &payload, + Some(parent_beacon_block_root), + ); + EngineT::validate_version_specific_fields( + &self.inner.chain_spec, + EngineApiMessageVersion::V4, + payload_or_attrs, + )?; + + let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; + + Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + } + /// Sends a message to the beacon consensus engine to update the fork choice _without_ /// withdrawals. /// @@ -280,7 +304,42 @@ where .map_err(|_| EngineApiError::UnknownPayload)? .try_into() .map_err(|_| { - warn!("could not transform built payload into ExecutionPayloadV2"); + warn!("could not transform built payload into ExecutionPayloadV3"); + EngineApiError::UnknownPayload + }) + } + + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + pub async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> EngineApiResult { + // First we fetch the payload attributes to check the timestamp + let attributes = self.get_payload_attributes(payload_id).await?; + + // validate timestamp according to engine rules + validate_payload_timestamp( + &self.inner.chain_spec, + EngineApiMessageVersion::V4, + attributes.timestamp(), + )?; + + // Now resolve the payload + self.inner + .payload_store + .resolve(payload_id) + .await + .ok_or(EngineApiError::UnknownPayload)? + .map_err(|_| EngineApiError::UnknownPayload)? + .try_into() + .map_err(|_| { + warn!("could not transform built payload into ExecutionPayloadV4"); EngineApiError::UnknownPayload }) } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 3ab9a74b9..f504c169c 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -8,7 +8,8 @@ use reth_primitives::{ }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, + ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + ExecutionPayloadV4, PayloadError, }; /// Converts [ExecutionPayloadV1] to [Block] @@ -83,8 +84,18 @@ pub fn try_payload_v3_to_block(payload: ExecutionPayloadV3) -> Result Result { + // this performs the same conversion as the underlying V3 payload. + // + // the new request lists (`deposit_requests`, `withdrawal_requests`) are EL -> CL only, so we do + // not do anything special here to handle them + try_payload_v3_to_block(payload.payload_inner) +} + /// Converts [SealedBlock] to [ExecutionPayload] pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { + // todo(onbjerg): check for requests_root here and return payload v4 if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) @@ -224,6 +235,7 @@ pub fn try_into_block( ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, + ExecutionPayload::V4(payload) => try_payload_v4_to_block(payload)?, }; base_payload.header.parent_beacon_block_root = parent_beacon_block_root; diff --git a/crates/rpc/rpc-types/src/beacon/payload.rs b/crates/rpc/rpc-types/src/beacon/payload.rs index a4898b723..2bc4cde78 100644 --- a/crates/rpc/rpc-types/src/beacon/payload.rs +++ b/crates/rpc/rpc-types/src/beacon/payload.rs @@ -498,6 +498,10 @@ impl<'a> From<&'a ExecutionPayload> for BeaconExecutionPayload<'a> { ExecutionPayload::V3(payload) => { BeaconExecutionPayload::V3(BeaconExecutionPayloadV3::from(payload)) } + ExecutionPayload::V4(_payload) => { + // TODO(onbjerg): Implement `ExecutionPayloadV4` support + todo!() + } } } } From 16f85c43397d1ee113bcdeab0e8850dba6aa1ac5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 6 May 2024 19:36:08 +0200 Subject: [PATCH 211/250] fix(discv5): decouple rlpx & discv5 ipmode (#8080) Co-authored-by: Matthias Seitz --- bin/reth/src/commands/p2p/mod.rs | 22 ++- crates/net/discv5/src/config.rs | 206 +++++++++++++++++++++++---- crates/net/discv5/src/error.rs | 3 + crates/net/discv5/src/lib.rs | 59 ++++---- crates/net/network/src/config.rs | 7 +- crates/net/network/src/discovery.rs | 2 +- crates/node-core/src/args/network.rs | 41 ++++-- crates/node-core/src/node_config.rs | 25 +++- 8 files changed, 278 insertions(+), 87 deletions(-) diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 18cc6aba8..c3ad0231b 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -18,7 +18,11 @@ use reth_discv4::NatResolver; use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; use reth_provider::ProviderFactory; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{SocketAddrV4, SocketAddrV6}, + path::PathBuf, + sync::Arc, +}; /// `reth p2p` command #[derive(Debug, Parser)] @@ -143,19 +147,23 @@ impl Command { { network_config = network_config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { - discv5_addr, - discv5_port, + discv5_addr: discv5_addr_ipv4, + discv5_addr_ipv6, + discv5_port: discv5_port_ipv4, + discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, discv5_bootstrap_lookup_countdown, .. } = self.discovery; + builder .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( - discv5_addr, - discv5_port, - )))) + discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv6 + .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), + )) .build(), ) .lookup_interval(discv5_lookup_interval) diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index da7e58cb7..2a246d3d5 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -3,20 +3,26 @@ use std::{ collections::HashSet, fmt::Debug, - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, }; use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; +use tracing::warn; use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; -/// The default address for discv5 via UDP. +/// The default address for discv5 via UDP is IPv4. /// /// Default is 0.0.0.0, all interfaces. See [`discv5::ListenConfig`] default. -pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); +pub const DEFAULT_DISCOVERY_V5_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; + +/// The default IPv6 address for discv5 via UDP. +/// +/// Default is ::, all interfaces. +pub const DEFAULT_DISCOVERY_V5_ADDR_IPV6: Ipv6Addr = Ipv6Addr::UNSPECIFIED; /// The default port for discv5 via UDP. /// @@ -40,7 +46,7 @@ pub const DEFAULT_COUNT_BOOTSTRAP_LOOKUPS: u64 = 100; pub const DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL: u64 = 5; /// Builds a [`Config`]. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct ConfigBuilder { /// Config used by [`discv5::Discv5`]. Contains the discovery listen socket. discv5_config: Option, @@ -51,10 +57,11 @@ pub struct ConfigBuilder { /// /// Defaults to L1 mainnet if not set. fork: Option<(&'static [u8], ForkId)>, - /// RLPx TCP port to advertise. Note: so long as `reth_network` handles [`NodeRecord`]s as - /// opposed to [`Enr`](enr::Enr)s, TCP is limited to same IP address as UDP, since - /// [`NodeRecord`] doesn't supply an extra field for and alternative TCP address. - tcp_port: u16, + /// RLPx TCP socket to advertise. + /// + /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// [`discv5::ListenConfig`]. + tcp_socket: SocketAddr, /// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record /// (in addition to tcp port, udp port and fork). other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, @@ -77,7 +84,7 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval, bootstrap_lookup_interval, @@ -89,7 +96,7 @@ impl ConfigBuilder { discv5_config: Some(discv5_config), bootstrap_nodes, fork: fork.map(|(key, fork_id)| (key, fork_id.fork_id)), - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval: Some(lookup_interval), bootstrap_lookup_interval: Some(bootstrap_lookup_interval), @@ -152,9 +159,11 @@ impl ConfigBuilder { self } - /// Sets the tcp port to advertise in the local [`Enr`](discv5::enr::Enr). - pub fn tcp_port(mut self, port: u16) -> Self { - self.tcp_port = port; + /// Sets the tcp socket to advertise in the local [`Enr`](discv5::enr::Enr). The IP address of + /// this socket will overwrite the discovery address of the same IP version, if one is + /// configured. + pub fn tcp_socket(mut self, socket: SocketAddr) -> Self { + self.tcp_socket = socket; self } @@ -201,7 +210,7 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval, bootstrap_lookup_interval, @@ -209,9 +218,12 @@ impl ConfigBuilder { discovered_peer_filter, } = self; - let discv5_config = discv5_config + let mut discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); + discv5_config.listen_config = + amend_listen_config_wrt_rlpx(&discv5_config.listen_config, tcp_socket.ip()); + let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); @@ -227,7 +239,7 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, + tcp_socket, other_enr_kv_pairs, lookup_interval, bootstrap_lookup_interval, @@ -248,8 +260,11 @@ pub struct Config { /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>, - /// RLPx TCP port to advertise. - pub(super) tcp_port: u16, + /// RLPx TCP socket to advertise. + /// + /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// [`discv5::ListenConfig`]. + pub(super) tcp_socket: SocketAddr, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to /// peers by including in local node record. pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, @@ -266,9 +281,20 @@ pub struct Config { } impl Config { - /// Returns a new [`ConfigBuilder`], with the RLPx TCP port set to the given port. - pub fn builder(rlpx_tcp_port: u16) -> ConfigBuilder { - ConfigBuilder::default().tcp_port(rlpx_tcp_port) + /// Returns a new [`ConfigBuilder`], with the RLPx TCP port and IP version configured w.r.t. + /// the given socket. + pub fn builder(rlpx_tcp_socket: SocketAddr) -> ConfigBuilder { + ConfigBuilder { + discv5_config: None, + bootstrap_nodes: HashSet::new(), + fork: None, + tcp_socket: rlpx_tcp_socket, + other_enr_kv_pairs: Vec::new(), + lookup_interval: None, + bootstrap_lookup_interval: None, + bootstrap_lookup_countdown: None, + discovered_peer_filter: None, + } } } @@ -286,12 +312,104 @@ impl Config { /// Returns the RLPx (TCP) socket contained in the [`discv5::Config`]. This socket will be /// advertised to peers in the local [`Enr`](discv5::enr::Enr). - pub fn rlpx_socket(&self) -> SocketAddr { - let port = self.tcp_port; - match self.discv5_config.listen_config { - ListenConfig::Ipv4 { ip, .. } => (ip, port).into(), - ListenConfig::Ipv6 { ip, .. } => (ip, port).into(), - ListenConfig::DualStack { ipv4, .. } => (ipv4, port).into(), + pub fn rlpx_socket(&self) -> &SocketAddr { + &self.tcp_socket + } +} + +/// Returns the IPv4 discovery socket if one is configured. +pub fn ipv4(listen_config: &ListenConfig) -> Option { + match listen_config { + ListenConfig::Ipv4 { ip, port } | + ListenConfig::DualStack { ipv4: ip, ipv4_port: port, .. } => { + Some(SocketAddrV4::new(*ip, *port)) + } + ListenConfig::Ipv6 { .. } => None, + } +} + +/// Returns the IPv6 discovery socket if one is configured. +pub fn ipv6(listen_config: &ListenConfig) -> Option { + match listen_config { + ListenConfig::Ipv4 { .. } => None, + ListenConfig::Ipv6 { ip, port } | + ListenConfig::DualStack { ipv6: ip, ipv6_port: port, .. } => { + Some(SocketAddrV6::new(*ip, *port, 0, 0)) + } + } +} + +/// Returns the amended [`discv5::ListenConfig`] based on the RLPx IP address. The ENR is limited +/// to one IP address per IP version (atm, may become spec'd how to advertise different addresses). +/// The RLPx address overwrites the discv5 address w.r.t. IP version. +pub fn amend_listen_config_wrt_rlpx( + listen_config: &ListenConfig, + rlpx_addr: IpAddr, +) -> ListenConfig { + let discv5_socket_ipv4 = ipv4(listen_config); + let discv5_socket_ipv6 = ipv6(listen_config); + + let discv5_port_ipv4 = + discv5_socket_ipv4.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT); + let discv5_addr_ipv4 = discv5_socket_ipv4.map(|socket| *socket.ip()); + let discv5_port_ipv6 = + discv5_socket_ipv6.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT); + let discv5_addr_ipv6 = discv5_socket_ipv6.map(|socket| *socket.ip()); + + let (discv5_socket_ipv4, discv5_socket_ipv6) = discv5_sockets_wrt_rlpx_addr( + rlpx_addr, + discv5_addr_ipv4, + discv5_port_ipv4, + discv5_addr_ipv6, + discv5_port_ipv6, + ); + + ListenConfig::from_two_sockets(discv5_socket_ipv4, discv5_socket_ipv6) +} + +/// Returns the sockets that can be used for discv5 with respect to the RLPx address. ENR specs only +/// acknowledge one address per IP version. +pub fn discv5_sockets_wrt_rlpx_addr( + rlpx_addr: IpAddr, + discv5_addr_ipv4: Option, + discv5_port_ipv4: u16, + discv5_addr_ipv6: Option, + discv5_port_ipv6: u16, +) -> (Option, Option) { + match rlpx_addr { + IpAddr::V4(rlpx_addr) => { + let discv5_socket_ipv6 = + discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); + + if let Some(discv5_addr) = discv5_addr_ipv4 { + warn!(target: "discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } + + // overwrite discv5 ipv4 addr with RLPx address. this is since there is no + // spec'd way to advertise a different address for rlpx and discovery in the + // ENR. + (Some(SocketAddrV4::new(rlpx_addr, discv5_port_ipv4)), discv5_socket_ipv6) + } + IpAddr::V6(rlpx_addr) => { + let discv5_socket_ipv4 = + discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); + + if let Some(discv5_addr) = discv5_addr_ipv6 { + warn!(target: "discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } + + // overwrite discv5 ipv6 addr with RLPx address. this is since there is no + // spec'd way to advertise a different address for rlpx and discovery in the + // ENR. + (discv5_socket_ipv4, Some(SocketAddrV6::new(rlpx_addr, discv5_port_ipv6, 0, 0))) } } } @@ -351,7 +469,7 @@ mod test { fn parse_boot_nodes() { const OP_SEPOLIA_CL_BOOTNODES: &str ="enr:-J64QBwRIWAco7lv6jImSOjPU_W266lHXzpAS5YOh7WmgTyBZkgLgOwo_mxKJq3wz2XRbsoBItbv1dCyjIoNq67mFguGAYrTxM42gmlkgnY0gmlwhBLSsHKHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDmoWSi8hcsRpQf2eJsNUx-sqv6fH4btmo2HsAzZFAKnKDdGNwgiQGg3VkcIIkBg,enr:-J64QFa3qMsONLGphfjEkeYyF6Jkil_jCuJmm7_a42ckZeUQGLVzrzstZNb1dgBp1GGx9bzImq5VxJLP-BaptZThGiWGAYrTytOvgmlkgnY0gmlwhGsV-zeHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDahfSECTIS_cXyZ8IyNf4leANlZnrsMEWTkEYxf4GMCmDdGNwgiQGg3VkcIIkBg"; - let config = Config::builder(30303) + let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into()) .add_cl_serialized_signed_boot_nodes(OP_SEPOLIA_CL_BOOTNODES) .build(); @@ -371,7 +489,7 @@ mod test { #[test] fn parse_enodes() { - let config = Config::builder(30303) + let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into()) .add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET) .build(); @@ -382,4 +500,34 @@ mod test { assert!(bootstrap_nodes.contains(&node.to_string())); } } + + #[test] + fn overwrite_ipv4_addr() { + let rlpx_addr: Ipv4Addr = "192.168.0.1".parse().unwrap(); + + let listen_config = ListenConfig::default(); + + let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into()); + + let config_socket_ipv4 = ipv4(&amended_config).unwrap(); + + assert_eq!(*config_socket_ipv4.ip(), rlpx_addr); + assert_eq!(config_socket_ipv4.port(), DEFAULT_DISCOVERY_V5_PORT); + assert_eq!(ipv6(&amended_config), ipv6(&listen_config)); + } + + #[test] + fn overwrite_ipv6_addr() { + let rlpx_addr: Ipv6Addr = "fe80::1".parse().unwrap(); + + let listen_config = ListenConfig::default(); + + let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into()); + + let config_socket_ipv6 = ipv6(&amended_config).unwrap(); + + assert_eq!(*config_socket_ipv6.ip(), rlpx_addr); + assert_eq!(config_socket_ipv6.port(), DEFAULT_DISCOVERY_V5_PORT); + assert_eq!(ipv4(&amended_config), ipv4(&listen_config)); + } } diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index 165620898..277631464 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -35,4 +35,7 @@ pub enum Error { /// An error from underlying [`discv5::Discv5`] node. #[error("sigp/discv5 error, {0}")] Discv5Error(discv5::Error), + /// The [`ListenConfig`](discv5::ListenConfig) has been misconfigured. + #[error("misconfigured listen config, RLPx TCP address must also be supported by discv5")] + ListenConfigMisconfigured, } diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 8e156dde1..826556fb0 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -39,8 +39,8 @@ pub use discv5::{self, IpMode}; pub use config::{ BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, - DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, - DEFAULT_SECONDS_LOOKUP_INTERVAL, + DEFAULT_DISCOVERY_V5_ADDR_IPV6, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; pub use enr::enr_to_discv4_id; pub use error::Error; @@ -66,8 +66,8 @@ pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0; pub struct Discv5 { /// sigp/discv5 node. discv5: Arc, - /// [`IpMode`] of the the node. - ip_mode: IpMode, + /// [`IpMode`] of the the RLPx network. + rlpx_ip_mode: IpMode, /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. fork_key: Option<&'static [u8]>, /// Filter applied to a discovered peers before passing it up to app. @@ -162,7 +162,7 @@ impl Discv5 { // // 1. make local enr from listen config // - let (enr, bc_enr, fork_key, ip_mode) = build_local_enr(sk, &discv5_config); + let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); trace!(target: "net::discv5", ?enr, @@ -214,7 +214,7 @@ impl Discv5 { ); Ok(( - Self { discv5, ip_mode, fork_key, discovered_peer_filter, metrics }, + Self { discv5, rlpx_ip_mode, fork_key, discovered_peer_filter, metrics }, discv5_updates, bc_enr, )) @@ -328,7 +328,7 @@ impl Discv5 { } /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], - /// w.r.t. local [`IpMode`]. Uses source socket as udp socket. + /// w.r.t. local RLPx [`IpMode`]. Uses source socket as udp socket. pub fn try_into_reachable( &self, enr: &discv5::Enr, @@ -336,13 +336,15 @@ impl Discv5 { ) -> Result { let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?; - // since we, on bootstrap, set tcp4 in local ENR for `IpMode::Dual`, we prefer tcp4 here - // too - let Some(tcp_port) = (match self.ip_mode() { - IpMode::Ip4 | IpMode::DualStack => enr.tcp4(), + if enr.tcp4().is_none() && enr.tcp6().is_none() { + return Err(Error::UnreachableRlpx) + } + let Some(tcp_port) = (match self.rlpx_ip_mode { + IpMode::Ip4 => enr.tcp4(), IpMode::Ip6 => enr.tcp6(), + _ => unimplemented!("dual-stack support not implemented for rlpx"), }) else { - return Err(Error::IpVersionMismatchRlpx(self.ip_mode())) + return Err(Error::IpVersionMismatchRlpx(self.rlpx_ip_mode)) }; Ok(NodeRecord { address: socket.ip(), tcp_port, udp_port: socket.port(), id }) @@ -385,9 +387,9 @@ impl Discv5 { // Complementary //////////////////////////////////////////////////////////////////////////////////////////////// - /// Returns the [`IpMode`] of the local node. + /// Returns the RLPx [`IpMode`] of the local node. pub fn ip_mode(&self) -> IpMode { - self.ip_mode + self.rlpx_ip_mode } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). @@ -418,43 +420,45 @@ pub fn build_local_enr( ) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { let mut builder = discv5::enr::Enr::builder(); - let Config { discv5_config, fork, tcp_port, other_enr_kv_pairs, .. } = config; + let Config { discv5_config, fork, tcp_socket, other_enr_kv_pairs, .. } = config; - let (ip_mode, socket) = match discv5_config.listen_config { + let socket = match discv5_config.listen_config { ListenConfig::Ipv4 { ip, port } => { if ip != Ipv4Addr::UNSPECIFIED { builder.ip4(ip); } builder.udp4(port); - builder.tcp4(*tcp_port); + builder.tcp4(tcp_socket.port()); - (IpMode::Ip4, (ip, port).into()) + (ip, port).into() } ListenConfig::Ipv6 { ip, port } => { if ip != Ipv6Addr::UNSPECIFIED { builder.ip6(ip); } builder.udp6(port); - builder.tcp6(*tcp_port); + builder.tcp6(tcp_socket.port()); - (IpMode::Ip6, (ip, port).into()) + (ip, port).into() } ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { if ipv4 != Ipv4Addr::UNSPECIFIED { builder.ip4(ipv4); } builder.udp4(ipv4_port); - builder.tcp4(*tcp_port); + builder.tcp4(tcp_socket.port()); if ipv6 != Ipv6Addr::UNSPECIFIED { builder.ip6(ipv6); } builder.udp6(ipv6_port); - (IpMode::DualStack, (ipv6, ipv6_port).into()) + (ipv6, ipv6_port).into() } }; + let rlpx_ip_mode = if tcp_socket.is_ipv4() { IpMode::Ip4 } else { IpMode::Ip6 }; + // identifies which network node is on let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); @@ -473,7 +477,7 @@ pub fn build_local_enr( // backwards compatible enr let bc_enr = NodeRecord::from_secret_key(socket, sk); - (enr, bc_enr, network_stack_id, ip_mode) + (enr, bc_enr, network_stack_id, rlpx_ip_mode) } /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. @@ -660,7 +664,7 @@ mod test { ) .unwrap(), ), - ip_mode: IpMode::Ip4, + rlpx_ip_mode: IpMode::Ip4, fork_key: None, discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), @@ -673,9 +677,10 @@ mod test { let secret_key = SecretKey::new(&mut thread_rng()); let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap(); + let rlpx_addr: SocketAddr = "127.0.0.1:30303".parse().unwrap(); let discv5_listen_config = ListenConfig::from(discv5_addr); - let discv5_config = Config::builder(30303) + let discv5_config = Config::builder(rlpx_addr) .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); @@ -867,7 +872,9 @@ mod test { const TCP_PORT: u16 = 30303; let fork_id = MAINNET.latest_fork_id(); - let config = Config::builder(TCP_PORT).fork(NetworkStackId::ETH, fork_id).build(); + let config = Config::builder((Ipv4Addr::UNSPECIFIED, TCP_PORT).into()) + .fork(NetworkStackId::ETH, fork_id) + .build(); let sk = SecretKey::new(&mut thread_rng()); let (enr, _, _, _) = build_local_enr(&sk, &config); diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 40d88f991..9e0f055ea 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -121,16 +121,15 @@ impl NetworkConfig { self, f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, ) -> Self { - let rlpx_port = self.listener_addr.port(); let network_stack_id = NetworkStackId::id(&self.chain_spec); let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); - let mut builder = - reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); + let mut builder = reth_discv5::Config::builder(self.listener_addr) + .add_unsigned_boot_nodes(boot_nodes.into_iter()); if let Some(id) = network_stack_id { - builder = builder.fork(id, fork_id); + builder = builder.fork(id, fork_id) } self.set_discovery_v5(f(builder)) diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 67d659454..bb456d4ea 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -369,7 +369,7 @@ mod tests { let discv4_config = Discv4ConfigBuilder::default().external_ip_resolver(None).build(); let discv5_listen_config = discv5::ListenConfig::from(discv5_addr); - let discv5_config = reth_discv5::Config::builder(0) + let discv5_config = reth_discv5::Config::builder(discv5_addr) .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 0d5206e7f..7b1b9d0d5 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -5,7 +5,7 @@ use clap::Args; use reth_config::Config; use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ - DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; @@ -19,7 +19,11 @@ use reth_network::{ }; use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord}; use secp256k1::SecretKey; -use std::{net::IpAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + path::PathBuf, + sync::Arc, +}; /// Parameters for configuring the network more granularity via CLI #[derive(Debug, Clone, Args, PartialEq, Eq)] @@ -227,31 +231,40 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// The UDP address to use for devp2p peer discovery version 5. - #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", - default_value_t = DEFAULT_DISCOVERY_V5_ADDR)] - pub discv5_addr: IpAddr, + /// The UDP IPv4 address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)] + pub discv5_addr: Option, + + /// The UDP IPv6 address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)] + pub discv5_addr_ipv6: Option, - /// The UDP port to use for devp2p peer discovery version 5. + /// The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is + /// IPv4, or `--discv5.addr` is set. #[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT", default_value_t = DEFAULT_DISCOVERY_V5_PORT)] pub discv5_port: u16, + /// The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is + /// IPv6, or `--discv5.addr.ipv6` is set. + #[arg(id = "discovery.v5.port.ipv6", long = "discovery.v5.port.ipv6", value_name = "DISCOVERY_V5_PORT_IPV6", + default_value = None, default_value_t = DEFAULT_DISCOVERY_V5_PORT)] + pub discv5_port_ipv6: u16, + /// The interval in seconds at which to carry out periodic lookup queries, for the whole /// run of the program. - #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", - default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] + #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] pub discv5_lookup_interval: u64, /// The interval in seconds at which to carry out boost lookup queries, for a fixed number of /// times, at bootstrap. - #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", + #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)] pub discv5_bootstrap_lookup_interval: u64, /// The number of times to carry out boost lookup queries at bootstrap. - #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", - default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] + #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", + default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] pub discv5_bootstrap_lookup_countdown: u64, } @@ -289,8 +302,10 @@ impl Default for DiscoveryArgs { enable_discv5_discovery: cfg!(feature = "optimism"), addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, - discv5_addr: DEFAULT_DISCOVERY_V5_ADDR, + discv5_addr: None, + discv5_addr_ipv6: None, discv5_port: DEFAULT_DISCOVERY_V5_PORT, + discv5_port_ipv6: DEFAULT_DISCOVERY_V5_PORT, discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL, discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index a4301b804..dd7bd1ccb 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -26,7 +26,11 @@ use reth_provider::{ }; use reth_tasks::TaskExecutor; use secp256k1::SecretKey; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{SocketAddr, SocketAddrV4, SocketAddrV6}, + path::PathBuf, + sync::Arc, +}; use tracing::*; /// The default prometheus recorder handle. We use a global static to ensure that it is only @@ -482,19 +486,26 @@ impl NodeConfig { // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { - discv5_addr, - discv5_port, + discv5_addr: discv5_addr_ipv4, + discv5_addr_ipv6, + discv5_port: discv5_port_ipv4, + discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, discv5_bootstrap_lookup_countdown, .. } = self.network.discovery; + + let discv5_port_ipv4 = discv5_port_ipv4 + self.instance - 1; + let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; + builder .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( - discv5_addr, - discv5_port + self.instance - 1, - )))) + discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv6 + .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), + )) .build(), ) .lookup_interval(discv5_lookup_interval) From 47dade7c12cc73dc839461be4b134329ea769404 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 6 May 2024 14:14:49 -0400 Subject: [PATCH 212/250] fix: set mainnet timestamps in op chainspecs for consensus checks (#8129) --- crates/primitives/src/chain/spec.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 823548d27..de56ff1fe 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -286,7 +286,9 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), + (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), + (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), ]), base_fee_params: BaseFeeParamsKind::Variable( From 077f7310c707b07d52086a7887f3e92438106e42 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 6 May 2024 21:29:00 +0200 Subject: [PATCH 213/250] fix: ensure valid parent hash in prepare_invalid_response (#8123) Co-authored-by: Roman Krasiuk --- crates/consensus/beacon/src/engine/mod.rs | 31 +++++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index d3c5bfe09..4e3550cd3 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -702,7 +702,7 @@ where /// - null if client software cannot determine the ancestor of the invalid payload satisfying /// the above conditions. fn latest_valid_hash_for_invalid_payload( - &self, + &mut self, parent_hash: B256, insert_err: Option<&InsertBlockErrorKind>, ) -> Option { @@ -712,12 +712,31 @@ where } // Check if parent exists in side chain or in canonical chain. + // TODO: handle find_block_by_hash errors. if matches!(self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any), Ok(Some(_))) { Some(parent_hash) } else { - // TODO: attempt to iterate over ancestors in the invalid cache + // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && + matches!( + // TODO: handle find_block_by_hash errors. + self.blockchain.find_block_by_hash(current_hash, BlockSource::Any), + Ok(Some(_)) + ) + { + return Some(current_hash) + } + } None } } @@ -725,7 +744,7 @@ where /// Prepares the invalid payload response for the given hash, checking the /// database for the parent hash and populating the payload status with the latest valid hash /// according to the engine api spec. - fn prepare_invalid_response(&self, mut parent_hash: B256) -> PayloadStatus { + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> PayloadStatus { // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // PoW block, which we need to identify by looking at the parent's block difficulty if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) { @@ -734,10 +753,12 @@ where } } + let valid_parent_hash = + self.latest_valid_hash_for_invalid_payload(parent_hash, None).unwrap_or_default(); PayloadStatus::from_status(PayloadStatusEnum::Invalid { validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), }) - .with_latest_valid_hash(parent_hash) + .with_latest_valid_hash(valid_parent_hash) } /// Checks if the given `check` hash points to an invalid header, inserting the given `head` @@ -1089,7 +1110,7 @@ where /// /// This validation **MUST** be instantly run in all cases even during active sync process. fn ensure_well_formed_payload( - &self, + &mut self, payload: ExecutionPayload, cancun_fields: Option, ) -> Result { From c79c18874593f2554a09be82edac72a0c4230b19 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 6 May 2024 21:51:02 +0200 Subject: [PATCH 214/250] fix(discv5): no address cli arg (#8130) --- crates/net/network/src/config.rs | 7 ++++++- crates/node-core/src/node_config.rs | 27 +++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 9e0f055ea..c2a7b3238 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -141,11 +141,16 @@ impl NetworkConfig { self } - /// Sets the address for the incoming connection listener. + /// Sets the address for the incoming RLPx connection listener. pub fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = listener_addr; self } + + /// Returns the address for the incoming RLPx connection listener. + pub fn listener_addr(&self) -> &SocketAddr { + &self.listener_addr + } } impl NetworkConfig diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index dd7bd1ccb..5cb28c873 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -27,7 +27,7 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use secp256k1::SecretKey; use std::{ - net::{SocketAddr, SocketAddrV4, SocketAddrV6}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, path::PathBuf, sync::Arc, }; @@ -482,13 +482,14 @@ impl NodeConfig { return config } + let rlpx_addr = config.listener_addr().ip(); // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { let DiscoveryArgs { - discv5_addr: discv5_addr_ipv4, + discv5_addr, discv5_addr_ipv6, - discv5_port: discv5_port_ipv4, + discv5_port, discv5_port_ipv6, discv5_lookup_interval, discv5_bootstrap_lookup_interval, @@ -496,7 +497,9 @@ impl NodeConfig { .. } = self.network.discovery; - let discv5_port_ipv4 = discv5_port_ipv4 + self.instance - 1; + let discv5_addr_ipv4 = discv5_addr.or_else(|| ipv4(rlpx_addr)); + let discv5_addr_ipv6 = discv5_addr_ipv6.or_else(|| ipv6(rlpx_addr)); + let discv5_port_ipv4 = discv5_port + self.instance - 1; let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; builder @@ -548,3 +551,19 @@ impl Default for NodeConfig { } } } + +/// Returns the address if this is an [`Ipv4Addr`]. +pub fn ipv4(ip: IpAddr) -> Option { + match ip { + IpAddr::V4(ip) => Some(ip), + IpAddr::V6(_) => None, + } +} + +/// Returns the address if this is an [`Ipv6Addr`]. +pub fn ipv6(ip: IpAddr) -> Option { + match ip { + IpAddr::V4(_) => None, + IpAddr::V6(ip) => Some(ip), + } +} From 6f72c0ab5a28b18355089764d672ea08688e7e11 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 6 May 2024 21:51:11 +0200 Subject: [PATCH 215/250] cli(op): simplify init-state cmd (#8126) --- bin/reth/src/commands/init_state.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index e0558be32..ef640e01c 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -10,7 +10,7 @@ use crate::{ use clap::Parser; use reth_config::config::EtlConfig; use reth_db::{database::Database, init_db}; -use reth_node_core::init::{init_from_state_dump, init_genesis}; +use reth_node_core::init::init_from_state_dump; use reth_primitives::{ChainSpec, B256}; use reth_provider::ProviderFactory; @@ -44,7 +44,7 @@ pub struct InitStateCommand { /// JSONL file with state dump. /// - /// Must contain accounts in following format, additional account fields are ignored. Can + /// Must contain accounts in following format, additional account fields are ignored. Must /// also contain { "root": \ } as first line. /// { /// "balance": "\", @@ -59,8 +59,8 @@ pub struct InitStateCommand { /// /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until /// and including the non-genesis block to init chain at. See 'import' command. - #[arg(long, value_name = "STATE_DUMP_FILE", verbatim_doc_comment, default_value = None)] - state: Option, + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + state: PathBuf, #[command(flatten)] db: DatabaseArgs, @@ -69,7 +69,7 @@ pub struct InitStateCommand { impl InitStateCommand { /// Execute the `init` command pub async fn execute(self) -> eyre::Result<()> { - info!(target: "reth::cli", "reth init starting"); + info!(target: "reth::cli", "Reth init-state starting"); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); @@ -84,12 +84,9 @@ impl InitStateCommand { EtlConfig::default_file_size(), ); - info!(target: "reth::cli", "Writing genesis block"); + info!(target: "reth::cli", "Initiating state dump"); - let hash = match self.state { - Some(path) => init_at_state(path, provider_factory, etl_config)?, - None => init_genesis(provider_factory)?, - }; + let hash = init_at_state(self.state, provider_factory, etl_config)?; info!(target: "reth::cli", hash = ?hash, "Genesis block written"); Ok(()) From 5f82993c23164ce8ccdc7bf3ae5085205383a5c8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 May 2024 21:52:16 +0200 Subject: [PATCH 216/250] chore: replace beacon types (#8125) --- Cargo.lock | 168 +++--- Cargo.toml | 41 +- crates/rpc/rpc-types/Cargo.toml | 1 + crates/rpc/rpc-types/src/beacon/constants.rs | 17 - .../src/beacon/events/attestation.rs | 30 - .../beacon/events/light_client_finality.rs | 54 -- .../beacon/events/light_client_optimistic.rs | 24 - crates/rpc/rpc-types/src/beacon/events/mod.rs | 403 ------------- crates/rpc/rpc-types/src/beacon/header.rs | 125 ---- crates/rpc/rpc-types/src/beacon/mod.rs | 19 - crates/rpc/rpc-types/src/beacon/payload.rs | 569 ------------------ .../rpc/rpc-types/src/beacon/withdrawals.rs | 70 --- crates/rpc/rpc-types/src/lib.rs | 1 - crates/rpc/rpc-types/src/relay/mod.rs | 16 +- examples/beacon-api-sse/Cargo.toml | 1 + examples/beacon-api-sse/src/main.rs | 3 +- 16 files changed, 119 insertions(+), 1423 deletions(-) delete mode 100644 crates/rpc/rpc-types/src/beacon/constants.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/attestation.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/events/mod.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/header.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/mod.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/payload.rs delete mode 100644 crates/rpc/rpc-types/src/beacon/withdrawals.rs diff --git a/Cargo.lock b/Cargo.lock index 36fa163dd..866a38167 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.14", "once_cell", "version_check", "zerocopy", @@ -133,12 +133,12 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "c-kzg", "serde", ] @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -171,17 +171,17 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.8", + "winnow 0.6.7", ] [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "arbitrary", "c-kzg", "derive_more", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -211,10 +211,10 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", "serde_json", ] @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -245,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "serde", @@ -257,13 +257,13 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-signer", "alloy-sol-types", "async-trait", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "k256", "serde_json", @@ -300,7 +300,7 @@ dependencies = [ "derive_arbitrary", "derive_more", "ethereum_ssz", - "getrandom 0.2.15", + "getrandom 0.2.14", "hex-literal", "itoa", "k256", @@ -316,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -366,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -386,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -426,24 +426,36 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", ] +[[package]] +name = "alloy-rpc-types-beacon" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-primitives", + "alloy-rpc-types-engine", + "serde", + "serde_with", +] + [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", @@ -456,11 +468,11 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", "serde_json", ] @@ -468,7 +480,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "serde", @@ -478,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#05af0de129dc0fa081b19b2f0a69b26941f8fec7" +source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" dependencies = [ "alloy-primitives", "serde", @@ -488,7 +500,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "async-trait", @@ -501,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -557,7 +569,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.8", + "winnow 0.6.7", ] [[package]] @@ -576,7 +588,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -594,7 +606,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=c3ea7bc#c3ea7bce27113086030c94919954d42be5cad78a" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -1087,6 +1099,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" name = "beacon-api-sse" version = "0.0.0" dependencies = [ + "alloy-rpc-types-beacon", "clap", "futures-util", "mev-share-sse", @@ -1546,9 +1559,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.97" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", @@ -2964,7 +2977,7 @@ dependencies = [ name = "exex-rollup" version = "0.0.0" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rlp", "alloy-sol-types", "eyre", @@ -3318,9 +3331,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -4594,7 +4607,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.14", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -6132,7 +6145,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.14", ] [[package]] @@ -6226,7 +6239,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.14", "libredox", "thiserror", ] @@ -6586,8 +6599,8 @@ dependencies = [ name = "reth-codecs" version = "0.2.0-beta.6" dependencies = [ - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "arbitrary", "bytes", @@ -6792,9 +6805,9 @@ dependencies = [ name = "reth-e2e-test-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -7536,8 +7549,8 @@ name = "reth-primitives" version = "0.2.0-beta.6" dependencies = [ "alloy-chains", - "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", "alloy-trie", @@ -7804,8 +7817,9 @@ name = "reth-rpc-types" version = "0.2.0-beta.6" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-anvil", + "alloy-rpc-types-beacon", "alloy-rpc-types-engine", "alloy-rpc-types-trace", "arbitrary", @@ -7828,7 +7842,7 @@ name = "reth-rpc-types-compat" version = "0.2.0-beta.6" dependencies = [ "alloy-rlp", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7934,7 +7948,7 @@ dependencies = [ name = "reth-testing-utils" version = "0.2.0-beta.6" dependencies = [ - "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", "secp256k1", ] @@ -8068,10 +8082,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7d810bc#7d810bc44c08fe8ec90ebef556883c2531ebf111" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7168ac5#7168ac55682fb420da7a82ed94bfb0c30a034113" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=c3ea7bc)", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -8173,7 +8187,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -8575,11 +8589,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ - "bitflags 2.5.0", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -8588,9 +8602,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -9598,7 +9612,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.8", + "winnow 0.6.7", ] [[package]] @@ -10060,7 +10074,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.14", ] [[package]] @@ -10463,9 +10477,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" +checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index f8d3dcac2..e6edbe5b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,14 +278,9 @@ reth-node-events = { path = "crates/node/events" } reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "8.0.0", features = [ - "std", - "secp256k1", -], default-features = false } -revm-primitives = { version = "3.1.0", features = [ - "std", -], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7d810bc" } +revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7168ac5" } # eth alloy-chains = "0.1.15" @@ -294,20 +289,21 @@ alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "c3ea7bc" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "c3ea7bc" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "77c1240" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } # misc auto_impl = "1" @@ -382,10 +378,7 @@ secp256k1 = { version = "0.28", default-features = false, features = [ "recovery", ] } # TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 -enr = { version = "0.12.0", default-features = false, features = [ - "k256", - "rust-secp256k1", -] } +enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } # for eip-4844 c-kzg = "1.0.0" diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 1426b50f8..83ad91f5c 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -18,6 +18,7 @@ alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-anvil.workspace = true alloy-rpc-types-trace.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } ethereum_ssz_derive = { version = "0.5", optional = true } ethereum_ssz = { version = "0.5", optional = true } diff --git a/crates/rpc/rpc-types/src/beacon/constants.rs b/crates/rpc/rpc-types/src/beacon/constants.rs deleted file mode 100644 index 945a4ba20..000000000 --- a/crates/rpc/rpc-types/src/beacon/constants.rs +++ /dev/null @@ -1,17 +0,0 @@ -/// The Domain Separation Tag for hash_to_point in Ethereum beacon chain BLS12-381 signatures. -/// -/// This is also the name of the ciphersuite that defines beacon chain BLS signatures. -/// -/// See: -/// -/// -pub const BLS_DST_SIG: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; - -/// The number of bytes in a BLS12-381 public key. -pub const BLS_PUBLIC_KEY_BYTES_LEN: usize = 48; - -/// The number of bytes in a BLS12-381 secret key. -pub const BLS_SECRET_KEY_BYTES_LEN: usize = 32; - -/// The number of bytes in a BLS12-381 signature. -pub const BLS_SIGNATURE_BYTES_LEN: usize = 96; diff --git a/crates/rpc/rpc-types/src/beacon/events/attestation.rs b/crates/rpc/rpc-types/src/beacon/events/attestation.rs deleted file mode 100644 index c789a4671..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/attestation.rs +++ /dev/null @@ -1,30 +0,0 @@ -use alloy_primitives::B256; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestationData { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub index: u64, - pub beacon_block_root: B256, - pub source: Source, - pub target: Target, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Source { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub root: B256, -} -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Target { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub root: B256, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs b/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs deleted file mode 100644 index 10928c7a7..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs +++ /dev/null @@ -1,54 +0,0 @@ -use alloy_primitives::{Bytes, B256}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientFinalityData { - pub attested_header: AttestedHeader, - pub finalized_header: FinalizedHeader, - pub finality_branch: Vec, - pub sync_aggregate: SyncAggregate, - #[serde_as(as = "DisplayFromStr")] - pub signature_slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestedHeader { - pub beacon: Beacon, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Beacon { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - pub parent_root: B256, - pub state_root: B256, - pub body_root: B256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FinalizedHeader { - pub beacon: Beacon2, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Beacon2 { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - pub parent_root: B256, - pub state_root: B256, - pub body_root: B256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncAggregate { - pub sync_committee_bits: Bytes, - pub sync_committee_signature: Bytes, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs b/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs deleted file mode 100644 index af310f8cc..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::beacon::header::BeaconBlockHeader; -use alloy_primitives::Bytes; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientOptimisticData { - pub attested_header: AttestedHeader, - pub sync_aggregate: SyncAggregate, - #[serde_as(as = "DisplayFromStr")] - pub signature_slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestedHeader { - pub beacon: BeaconBlockHeader, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncAggregate { - pub sync_committee_bits: Bytes, - pub sync_committee_signature: Bytes, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/mod.rs b/crates/rpc/rpc-types/src/beacon/events/mod.rs deleted file mode 100644 index 501494a91..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/mod.rs +++ /dev/null @@ -1,403 +0,0 @@ -//! Support for the Beacon API events -//! -//! See also [ethereum-beacon-API eventstream](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) - -use crate::engine::PayloadAttributes; -use alloy_primitives::{Address, Bytes, B256}; -use attestation::AttestationData; -use light_client_finality::LightClientFinalityData; -use light_client_optimistic::LightClientOptimisticData; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -pub mod attestation; -pub mod light_client_finality; -pub mod light_client_optimistic; - -/// Topic variant for the eventstream API -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum BeaconNodeEventTopic { - PayloadAttributes, - Head, - Block, - Attestation, - VoluntaryExit, - BlsToExecutionChange, - FinalizedCheckpoint, - ChainReorg, - ContributionAndProof, - LightClientFinalityUpdate, - LightClientOptimisticUpdate, - BlobSidecar, -} - -impl BeaconNodeEventTopic { - /// Returns the identifier value for the eventstream query - pub fn query_value(&self) -> &'static str { - match self { - BeaconNodeEventTopic::PayloadAttributes => "payload_attributes", - BeaconNodeEventTopic::Head => "head", - BeaconNodeEventTopic::Block => "block", - BeaconNodeEventTopic::Attestation => "attestation", - BeaconNodeEventTopic::VoluntaryExit => "voluntary_exit", - BeaconNodeEventTopic::BlsToExecutionChange => "bls_to_execution_change", - BeaconNodeEventTopic::FinalizedCheckpoint => "finalized_checkpoint", - BeaconNodeEventTopic::ChainReorg => "chain_reorg", - BeaconNodeEventTopic::ContributionAndProof => "contribution_and_proof", - BeaconNodeEventTopic::LightClientFinalityUpdate => "light_client_finality_update", - BeaconNodeEventTopic::LightClientOptimisticUpdate => "light_client_optimistic_update", - BeaconNodeEventTopic::BlobSidecar => "blob_sidecar", - } - } -} - -/// Event for the `payload_attributes` topic of the beacon API node event stream. -/// -/// This event gives block builders and relays sufficient information to construct or verify a block -/// at `proposal_slot`. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct PayloadAttributesEvent { - /// the identifier of the beacon hard fork at `proposal_slot`, e.g `"bellatrix"`, `"capella"`. - pub version: String, - /// Wrapped data of the event. - pub data: PayloadAttributesData, -} - -/// Event for the `Head` topic of the beacon API node event stream. -/// -/// The node has finished processing, resulting in a new head. previous_duty_dependent_root is -/// \`get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)\` and -/// current_duty_dependent_root is \`get_block_root_at_slot(state, -/// compute_start_slot_at_epoch(epoch) -/// - 1)\`. Both dependent roots use the genesis block root in the case of underflow. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeadEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub block: B256, - pub state: B256, - pub epoch_transition: bool, - pub previous_duty_dependent_root: B256, - pub current_duty_dependent_root: B256, - pub execution_optimistic: bool, -} - -/// Event for the `Block` topic of the beacon API node event stream. -/// -/// The node has received a valid block (from P2P or API) -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlockEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub block: B256, - pub execution_optimistic: bool, -} - -/// Event for the `Attestation` topic of the beacon API node event stream. -/// -/// The node has received a valid attestation (from P2P or API) -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestationEvent { - pub aggregation_bits: Bytes, - pub signature: Bytes, - pub data: AttestationData, -} - -/// Event for the `VoluntaryExit` topic of the beacon API node event stream. -/// -/// The node has received a valid voluntary exit (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VoluntaryExitEvent { - pub message: VoluntaryExitMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VoluntaryExitMessage { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, -} - -/// Event for the `BlsToExecutionChange` topic of the beacon API node event stream. -/// -/// The node has received a BLS to execution change (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlsToExecutionChangeEvent { - pub message: BlsToExecutionChangeMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlsToExecutionChangeMessage { - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, - pub from_bls_pubkey: String, - pub to_execution_address: Address, -} - -/// Event for the `Deposit` topic of the beacon API node event stream. -/// -/// Finalized checkpoint has been updated -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FinalizedCheckpointEvent { - pub block: B256, - pub state: B256, - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub execution_optimistic: bool, -} - -/// Event for the `ChainReorg` topic of the beacon API node event stream. -/// -/// The node has reorganized its chain -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ChainReorgEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub depth: u64, - pub old_head_block: B256, - pub new_head_block: B256, - pub old_head_state: B256, - pub new_head_state: B256, - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub execution_optimistic: bool, -} - -/// Event for the `ContributionAndProof` topic of the beacon API node event stream. -/// -/// The node has received a valid sync committee SignedContributionAndProof (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ContributionAndProofEvent { - pub message: ContributionAndProofMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ContributionAndProofMessage { - #[serde_as(as = "DisplayFromStr")] - pub aggregator_index: u64, - pub contribution: Contribution, - pub selection_proof: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Contribution { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub beacon_block_root: B256, - #[serde_as(as = "DisplayFromStr")] - pub subcommittee_index: u64, - pub aggregation_bits: Bytes, - pub signature: Bytes, -} - -/// Event for the `LightClientFinalityUpdate` topic of the beacon API node event stream. -/// -/// The node's latest known `LightClientFinalityUpdate` has been updated -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientFinalityUpdateEvent { - pub version: String, - pub data: LightClientFinalityData, -} - -/// Event for the `LightClientOptimisticUpdate` topic of the beacon API node event stream. -/// -/// The node's latest known `LightClientOptimisticUpdate` has been updated -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientOptimisticUpdateEvent { - pub version: String, - pub data: LightClientOptimisticData, -} - -/// Event for the `BlobSidecar` topic of the beacon API node event stream. -/// -/// The node has received a BlobSidecar (from P2P or API) that passes all gossip validations on the -/// blob_sidecar_{subnet_id} topic -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobSidecarEvent { - pub block_root: B256, - #[serde_as(as = "DisplayFromStr")] - pub index: u64, - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub kzg_commitment: Bytes, - pub versioned_hash: B256, -} - -impl PayloadAttributesEvent { - /// Returns the payload attributes - pub fn attributes(&self) -> &PayloadAttributes { - &self.data.payload_attributes - } -} - -/// Data of the event that contains the payload attributes -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct PayloadAttributesData { - /// The slot at which a block using these payload attributes may be built - #[serde_as(as = "DisplayFromStr")] - pub proposal_slot: u64, - /// the beacon block root of the parent block to be built upon. - pub parent_block_root: B256, - /// the execution block number of the parent block. - #[serde_as(as = "DisplayFromStr")] - pub parent_block_number: u64, - /// the execution block hash of the parent block. - pub parent_block_hash: B256, - /// The execution block number of the parent block. - /// the validator index of the proposer at `proposal_slot` on the chain identified by - /// `parent_block_root`. - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - /// Beacon API encoding of `PayloadAttributesV` as defined by the `execution-apis` - /// specification - /// - /// Note: this uses the beacon API format which uses snake-case and quoted decimals rather than - /// big-endian hex. - #[serde(with = "crate::beacon::payload::beacon_api_payload_attributes")] - pub payload_attributes: PayloadAttributes, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_payload_attributes_event() { - let s = r#"{"version":"capella","data":{"proposal_slot":"173332","proposer_index":"649112","parent_block_root":"0x5a49069647f6bf8f25d76b55ce920947654ade4ba1c6ab826d16712dd62b42bf","parent_block_number":"161093","parent_block_hash":"0x608b3d140ecb5bbcd0019711ac3704ece7be8e6d100816a55db440c1bcbb0251","payload_attributes":{"timestamp":"1697982384","prev_randao":"0x3142abd98055871ebf78f0f8e758fd3a04df3b6e34d12d09114f37a737f8f01e","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"2461612","validator_index":"853570","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"45016211"},{"index":"2461613","validator_index":"853571","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5269785"},{"index":"2461614","validator_index":"853572","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5275106"},{"index":"2461615","validator_index":"853573","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5235962"},{"index":"2461616","validator_index":"853574","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5252171"},{"index":"2461617","validator_index":"853575","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5221319"},{"index":"2461618","validator_index":"853576","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5260879"},{"index":"2461619","validator_index":"853577","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5285244"},{"index":"2461620","validator_index":"853578","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5266681"},{"index":"2461621","validator_index":"853579","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5271322"},{"index":"2461622","validator_index":"853580","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5231327"},{"index":"2461623","validator_index":"853581","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5276761"},{"index":"2461624","validator_index":"853582","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5246244"},{"index":"2461625","validator_index":"853583","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5261011"},{"index":"2461626","validator_index":"853584","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5276477"},{"index":"2461627","validator_index":"853585","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5275319"}]}}}"#; - - let event: PayloadAttributesEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_head_event() { - let s = r#"{"slot":"10", "block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch_transition":false, "previous_duty_dependent_root":"0x5e0043f107cb57913498fbf2f99ff55e730bf1e151f02f221e977c91a90a0e91", "current_duty_dependent_root":"0x5e0043f107cb57913498fbf2f99ff55e730bf1e151f02f221e977c91a90a0e91", "execution_optimistic": false}"#; - - let event: HeadEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_block_event() { - let s = r#"{"slot":"10", "block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "execution_optimistic": false}"#; - - let event: BlockEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_attestation_event() { - let s = r#"{"aggregation_bits":"0x01", "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", "data":{"slot":"1", "index":"1", "beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "source":{"epoch":"1", "root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}, "target":{"epoch":"1", "root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}"#; - - let event: AttestationEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_voluntary_exit_event() { - let s = r#"{"message":{"epoch":"1", "validator_index":"1"}, "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let event: VoluntaryExitEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_bls_to_execution_change_event() { - let s = r#"{"message":{"validator_index":"1", "from_bls_pubkey":"0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95", "to_execution_address":"0x9be8d619c56699667c1fedcd15f6b14d8b067f72"}, "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let event: BlsToExecutionChangeEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_finalize_checkpoint_event() { - let s = r#"{"block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch":"2", "execution_optimistic": false }"#; - - let event: FinalizedCheckpointEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_chain_reorg_event() { - let s = r#"{"slot":"200", "depth":"50", "old_head_block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "new_head_block":"0x76262e91970d375a19bfe8a867288d7b9cde43c8635f598d93d39d041706fc76", "old_head_state":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "new_head_state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch":"2", "execution_optimistic": false}"#; - - let event: ChainReorgEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_contribution_and_proof_event() { - let s = r#"{"message": {"aggregator_index": "997", "contribution": {"slot": "168097", "beacon_block_root": "0x56f1fd4262c08fa81e27621c370e187e621a67fc80fe42340b07519f84b42ea1", "subcommittee_index": "0", "aggregation_bits": "0xffffffffffffffffffffffffffffffff", "signature": "0x85ab9018e14963026476fdf784cc674da144b3dbdb47516185438768774f077d882087b90ad642469902e782a8b43eed0cfc1b862aa9a473b54c98d860424a702297b4b648f3f30bdaae8a8b7627d10d04cb96a2cc8376af3e54a9aa0c8145e3"}, "selection_proof": "0x87c305f04bfe5db27c2b19fc23e00d7ac496ec7d3e759cbfdd1035cb8cf6caaa17a36a95a08ba78c282725e7b66a76820ca4eb333822bd399ceeb9807a0f2926c67ce67cfe06a0b0006838203b493505a8457eb79913ce1a3bcd1cc8e4ef30ed"}, "signature": "0xac118511474a94f857300b315c50585c32a713e4452e26a6bb98cdb619936370f126ed3b6bb64469259ee92e69791d9e12d324ce6fd90081680ce72f39d85d50b0ff977260a8667465e613362c6d6e6e745e1f9323ec1d6f16041c4e358839ac"}"#; - - let event: ContributionAndProofEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_light_client_finality_update_event() { - let s = r#"{"version":"phase0", "data": {"attested_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "finalized_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "finality_branch": ["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"], "sync_aggregate": {"sync_committee_bits":"0x01", "sync_committee_signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}, "signature_slot":"1"}}"#; - - let event: LightClientFinalityUpdateEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_light_client_optimistic_update_event() { - let s = r#"{"version":"phase0", "data": {"attested_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "sync_aggregate": {"sync_committee_bits":"0x01", "sync_committee_signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}, "signature_slot":"1"}}"#; - - let event: LightClientOptimisticUpdateEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_blob_sidecar_event() { - let s = r#"{"block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "index": "1", "slot": "1", "kzg_commitment": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", "versioned_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}"#; - - let event: BlobSidecarEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/header.rs b/crates/rpc/rpc-types/src/beacon/header.rs deleted file mode 100644 index 9843d3351..000000000 --- a/crates/rpc/rpc-types/src/beacon/header.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Beacon block header types. -//! -//! See also - -use alloy_primitives::{Bytes, B256}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -/// The response to a request for beacon block headers: `getBlockHeaders` -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeadersResponse { - /// True if the response references an unverified execution payload. Optimistic information may - /// be invalidated at a later time. If the field is not present, assume the False value. - pub execution_optimistic: bool, - /// True if the response references the finalized history of the chain, as determined by fork - /// choice. If the field is not present, additional calls are necessary to compare the epoch of - /// the requested information with the finalized checkpoint. - pub finalized: bool, - /// Container for the header data. - pub data: Vec, -} - -/// The response to a request for a __single__ beacon block header: `headers/{id}` -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeaderResponse { - /// True if the response references an unverified execution payload. Optimistic information may - /// be invalidated at a later time. If the field is not present, assume the False value. - pub execution_optimistic: bool, - /// True if the response references the finalized history of the chain, as determined by fork - /// choice. If the field is not present, additional calls are necessary to compare the epoch of - /// the requested information with the finalized checkpoint. - pub finalized: bool, - /// Container for the header data. - pub data: HeaderData, -} - -/// Container type for a beacon block header. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeaderData { - /// root hash of the block - pub root: B256, - /// Whether the block is part of the canonical chain - pub canonical: bool, - /// The `SignedBeaconBlockHeader` object envelope from the CL spec. - pub header: Header, -} - -/// [BeaconBlockHeader] with a signature. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Header { - /// The `BeaconBlockHeader` object from the CL spec. - pub message: BeaconBlockHeader, - pub signature: Bytes, -} - -/// The header of a beacon block. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BeaconBlockHeader { - /// The slot to which this block corresponds. - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - /// Index of validator in validator registry. - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - /// The signing merkle root of the parent BeaconBlock. - pub parent_root: B256, - /// The tree hash merkle root of the BeaconState for the BeaconBlock. - pub state_root: B256, - /// The tree hash merkle root of the BeaconBlockBody for the BeaconBlock - pub body_root: B256, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_headers_response() { - let s = r#"{ - "execution_optimistic": false, - "finalized": false, - "data": [ - { - "root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "canonical": true, - "header": { - "message": { - "slot": "1", - "proposer_index": "1", - "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" - }, - "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - } - } - ] -}"#; - let _header_response: HeadersResponse = serde_json::from_str(s).unwrap(); - } - - #[test] - fn serde_header_response() { - let s = r#"{ - "execution_optimistic": false, - "finalized": false, - "data": { - "root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "canonical": true, - "header": { - "message": { - "slot": "1", - "proposer_index": "1", - "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" - }, - "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - } - } -}"#; - let _header_response: HeaderResponse = serde_json::from_str(s).unwrap(); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/mod.rs b/crates/rpc/rpc-types/src/beacon/mod.rs deleted file mode 100644 index 1184d2e43..000000000 --- a/crates/rpc/rpc-types/src/beacon/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Types for the Ethereum 2.0 RPC protocol (beacon chain). - -#![allow(missing_docs)] - -use alloy_primitives::FixedBytes; -use constants::{BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN}; - -pub mod constants; -/// Beacon API events support. -pub mod events; -pub mod header; -pub mod payload; -pub mod withdrawals; - -/// BLS signature type -pub type BlsSignature = FixedBytes; - -/// BLS public key type -pub type BlsPublicKey = FixedBytes; diff --git a/crates/rpc/rpc-types/src/beacon/payload.rs b/crates/rpc/rpc-types/src/beacon/payload.rs deleted file mode 100644 index 2bc4cde78..000000000 --- a/crates/rpc/rpc-types/src/beacon/payload.rs +++ /dev/null @@ -1,569 +0,0 @@ -//! Payload support for the beacon API. -//! -//! Internal helper module to deserialize/serialize the payload attributes for the beacon API, which -//! uses snake case and quoted decimals. -//! -//! This is necessary because we don't want to allow a mixture of both formats, hence `serde` -//! aliases are not an option. -//! -//! See also - -#![allow(missing_docs)] - -use crate::{ - beacon::{withdrawals::BeaconWithdrawal, BlsPublicKey}, - engine::{ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}, - Withdrawal, -}; -use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_with::{serde_as, DeserializeAs, DisplayFromStr, SerializeAs}; -use std::borrow::Cow; - -/// Response object of GET `/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}` -/// -/// See also -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct GetExecutionPayloadHeaderResponse { - pub version: String, - pub data: ExecutionPayloadHeaderData, -} - -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeaderData { - pub message: ExecutionPayloadHeaderMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeaderMessage { - pub header: ExecutionPayloadHeader, - #[serde_as(as = "DisplayFromStr")] - pub value: U256, - pub pubkey: BlsPublicKey, -} - -/// The header of the execution payload. -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeader { - pub parent_hash: B256, - pub fee_recipient: Address, - pub state_root: B256, - pub receipts_root: B256, - pub logs_bloom: Bloom, - pub prev_randao: B256, - #[serde_as(as = "DisplayFromStr")] - pub block_number: String, - #[serde_as(as = "DisplayFromStr")] - pub gas_limit: u64, - #[serde_as(as = "DisplayFromStr")] - pub gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - pub timestamp: u64, - pub extra_data: Bytes, - #[serde_as(as = "DisplayFromStr")] - pub base_fee_per_gas: U256, - pub block_hash: B256, - pub transactions_root: B256, -} - -#[serde_as] -#[derive(Serialize, Deserialize)] -struct BeaconPayloadAttributes { - #[serde_as(as = "DisplayFromStr")] - timestamp: u64, - prev_randao: B256, - suggested_fee_recipient: Address, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde_as(as = "Option>")] - withdrawals: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - parent_beacon_block_root: Option, -} - -/// Optimism Payload Attributes -#[serde_as] -#[derive(Serialize, Deserialize)] -struct BeaconOptimismPayloadAttributes { - #[serde(flatten)] - payload_attributes: BeaconPayloadAttributes, - #[serde(default, skip_serializing_if = "Option::is_none")] - transactions: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - no_tx_pool: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - #[serde_as(as = "Option")] - gas_limit: Option, -} - -/// A helper module for serializing and deserializing optimism payload attributes for the beacon -/// API. -/// -/// See docs for [beacon_api_payload_attributes]. -pub mod beacon_api_payload_attributes_optimism { - use super::*; - use crate::engine::{OptimismPayloadAttributes, PayloadAttributes}; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &OptimismPayloadAttributes, - serializer: S, - ) -> Result - where - S: Serializer, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes { - timestamp: payload_attributes.payload_attributes.timestamp, - prev_randao: payload_attributes.payload_attributes.prev_randao, - suggested_fee_recipient: payload_attributes.payload_attributes.suggested_fee_recipient, - withdrawals: payload_attributes.payload_attributes.withdrawals.clone(), - parent_beacon_block_root: payload_attributes - .payload_attributes - .parent_beacon_block_root, - }; - - let op_beacon_api_payload_attributes = BeaconOptimismPayloadAttributes { - payload_attributes: beacon_api_payload_attributes, - transactions: payload_attributes.transactions.clone(), - no_tx_pool: payload_attributes.no_tx_pool, - gas_limit: payload_attributes.gas_limit, - }; - - op_beacon_api_payload_attributes.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let beacon_api_payload_attributes = - BeaconOptimismPayloadAttributes::deserialize(deserializer)?; - Ok(OptimismPayloadAttributes { - payload_attributes: PayloadAttributes { - timestamp: beacon_api_payload_attributes.payload_attributes.timestamp, - prev_randao: beacon_api_payload_attributes.payload_attributes.prev_randao, - suggested_fee_recipient: beacon_api_payload_attributes - .payload_attributes - .suggested_fee_recipient, - withdrawals: beacon_api_payload_attributes.payload_attributes.withdrawals, - parent_beacon_block_root: beacon_api_payload_attributes - .payload_attributes - .parent_beacon_block_root, - }, - transactions: beacon_api_payload_attributes.transactions, - no_tx_pool: beacon_api_payload_attributes.no_tx_pool, - gas_limit: beacon_api_payload_attributes.gas_limit, - }) - } -} - -/// A helper module for serializing and deserializing the payload attributes for the beacon API. -/// -/// The beacon API encoded object has equivalent fields to the -/// [PayloadAttributes](crate::engine::PayloadAttributes) with two differences: -/// 1) `snake_case` identifiers must be used rather than `camelCase`; -/// 2) integers must be encoded as quoted decimals rather than big-endian hex. -pub mod beacon_api_payload_attributes { - use super::*; - use crate::engine::PayloadAttributes; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &PayloadAttributes, - serializer: S, - ) -> Result - where - S: Serializer, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes { - timestamp: payload_attributes.timestamp, - prev_randao: payload_attributes.prev_randao, - suggested_fee_recipient: payload_attributes.suggested_fee_recipient, - withdrawals: payload_attributes.withdrawals.clone(), - parent_beacon_block_root: payload_attributes.parent_beacon_block_root, - }; - beacon_api_payload_attributes.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes::deserialize(deserializer)?; - Ok(PayloadAttributes { - timestamp: beacon_api_payload_attributes.timestamp, - prev_randao: beacon_api_payload_attributes.prev_randao, - suggested_fee_recipient: beacon_api_payload_attributes.suggested_fee_recipient, - withdrawals: beacon_api_payload_attributes.withdrawals, - parent_beacon_block_root: beacon_api_payload_attributes.parent_beacon_block_root, - }) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV1<'a> { - parent_hash: Cow<'a, B256>, - fee_recipient: Cow<'a, Address>, - state_root: Cow<'a, B256>, - receipts_root: Cow<'a, B256>, - logs_bloom: Cow<'a, Bloom>, - prev_randao: Cow<'a, B256>, - #[serde_as(as = "DisplayFromStr")] - block_number: u64, - #[serde_as(as = "DisplayFromStr")] - gas_limit: u64, - #[serde_as(as = "DisplayFromStr")] - gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - timestamp: u64, - extra_data: Cow<'a, Bytes>, - #[serde_as(as = "DisplayFromStr")] - base_fee_per_gas: U256, - block_hash: Cow<'a, B256>, - transactions: Cow<'a, Vec>, -} - -impl<'a> From> for ExecutionPayloadV1 { - fn from(payload: BeaconExecutionPayloadV1<'a>) -> Self { - let BeaconExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = payload; - ExecutionPayloadV1 { - parent_hash: parent_hash.into_owned(), - fee_recipient: fee_recipient.into_owned(), - state_root: state_root.into_owned(), - receipts_root: receipts_root.into_owned(), - logs_bloom: logs_bloom.into_owned(), - prev_randao: prev_randao.into_owned(), - block_number, - gas_limit, - gas_used, - timestamp, - extra_data: extra_data.into_owned(), - base_fee_per_gas, - block_hash: block_hash.into_owned(), - transactions: transactions.into_owned(), - } - } -} - -impl<'a> From<&'a ExecutionPayloadV1> for BeaconExecutionPayloadV1<'a> { - fn from(value: &'a ExecutionPayloadV1) -> Self { - let ExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = value; - - BeaconExecutionPayloadV1 { - parent_hash: Cow::Borrowed(parent_hash), - fee_recipient: Cow::Borrowed(fee_recipient), - state_root: Cow::Borrowed(state_root), - receipts_root: Cow::Borrowed(receipts_root), - logs_bloom: Cow::Borrowed(logs_bloom), - prev_randao: Cow::Borrowed(prev_randao), - block_number: *block_number, - gas_limit: *gas_limit, - gas_used: *gas_used, - timestamp: *timestamp, - extra_data: Cow::Borrowed(extra_data), - base_fee_per_gas: *base_fee_per_gas, - block_hash: Cow::Borrowed(block_hash), - transactions: Cow::Borrowed(transactions), - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v1 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV1, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV1::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV1::deserialize(deserializer).map(Into::into) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV2<'a> { - /// Inner V1 payload - #[serde(flatten)] - payload_inner: BeaconExecutionPayloadV1<'a>, - /// Array of [`Withdrawal`] enabled with V2 - /// See - #[serde_as(as = "Vec")] - withdrawals: Vec, -} - -impl<'a> From> for ExecutionPayloadV2 { - fn from(payload: BeaconExecutionPayloadV2<'a>) -> Self { - let BeaconExecutionPayloadV2 { payload_inner, withdrawals } = payload; - ExecutionPayloadV2 { payload_inner: payload_inner.into(), withdrawals } - } -} - -impl<'a> From<&'a ExecutionPayloadV2> for BeaconExecutionPayloadV2<'a> { - fn from(value: &'a ExecutionPayloadV2) -> Self { - let ExecutionPayloadV2 { payload_inner, withdrawals } = value; - BeaconExecutionPayloadV2 { - payload_inner: payload_inner.into(), - withdrawals: withdrawals.clone(), - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v2 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV2, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV2::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV2::deserialize(deserializer).map(Into::into) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV3<'a> { - /// Inner V1 payload - #[serde(flatten)] - payload_inner: BeaconExecutionPayloadV2<'a>, - #[serde_as(as = "DisplayFromStr")] - blob_gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - excess_blob_gas: u64, -} - -impl<'a> From> for ExecutionPayloadV3 { - fn from(payload: BeaconExecutionPayloadV3<'a>) -> Self { - let BeaconExecutionPayloadV3 { payload_inner, blob_gas_used, excess_blob_gas } = payload; - ExecutionPayloadV3 { payload_inner: payload_inner.into(), blob_gas_used, excess_blob_gas } - } -} - -impl<'a> From<&'a ExecutionPayloadV3> for BeaconExecutionPayloadV3<'a> { - fn from(value: &'a ExecutionPayloadV3) -> Self { - let ExecutionPayloadV3 { payload_inner, blob_gas_used, excess_blob_gas } = value; - BeaconExecutionPayloadV3 { - payload_inner: payload_inner.into(), - blob_gas_used: *blob_gas_used, - excess_blob_gas: *excess_blob_gas, - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v3 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV3, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV3::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV3::deserialize(deserializer).map(Into::into) - } -} - -/// Represents all possible payload versions. -#[derive(Debug, Serialize)] -#[serde(untagged)] -enum BeaconExecutionPayload<'a> { - /// V1 payload - V1(BeaconExecutionPayloadV1<'a>), - /// V2 payload - V2(BeaconExecutionPayloadV2<'a>), - /// V3 payload - V3(BeaconExecutionPayloadV3<'a>), -} - -// Deserializes untagged ExecutionPayload by trying each variant in falling order -impl<'de> Deserialize<'de> for BeaconExecutionPayload<'de> { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - #[serde(untagged)] - enum BeaconExecutionPayloadDesc<'a> { - V3(BeaconExecutionPayloadV3<'a>), - V2(BeaconExecutionPayloadV2<'a>), - V1(BeaconExecutionPayloadV1<'a>), - } - match BeaconExecutionPayloadDesc::deserialize(deserializer)? { - BeaconExecutionPayloadDesc::V3(payload) => Ok(Self::V3(payload)), - BeaconExecutionPayloadDesc::V2(payload) => Ok(Self::V2(payload)), - BeaconExecutionPayloadDesc::V1(payload) => Ok(Self::V1(payload)), - } - } -} - -impl<'a> From> for ExecutionPayload { - fn from(payload: BeaconExecutionPayload<'a>) -> Self { - match payload { - BeaconExecutionPayload::V1(payload) => { - ExecutionPayload::V1(ExecutionPayloadV1::from(payload)) - } - BeaconExecutionPayload::V2(payload) => { - ExecutionPayload::V2(ExecutionPayloadV2::from(payload)) - } - BeaconExecutionPayload::V3(payload) => { - ExecutionPayload::V3(ExecutionPayloadV3::from(payload)) - } - } - } -} - -impl<'a> From<&'a ExecutionPayload> for BeaconExecutionPayload<'a> { - fn from(value: &'a ExecutionPayload) -> Self { - match value { - ExecutionPayload::V1(payload) => { - BeaconExecutionPayload::V1(BeaconExecutionPayloadV1::from(payload)) - } - ExecutionPayload::V2(payload) => { - BeaconExecutionPayload::V2(BeaconExecutionPayloadV2::from(payload)) - } - ExecutionPayload::V3(payload) => { - BeaconExecutionPayload::V3(BeaconExecutionPayloadV3::from(payload)) - } - ExecutionPayload::V4(_payload) => { - // TODO(onbjerg): Implement `ExecutionPayloadV4` support - todo!() - } - } - } -} - -impl<'a> SerializeAs for BeaconExecutionPayload<'a> { - fn serialize_as(source: &ExecutionPayload, serializer: S) -> Result - where - S: Serializer, - { - beacon_payload::serialize(source, serializer) - } -} - -impl<'de> DeserializeAs<'de, ExecutionPayload> for BeaconExecutionPayload<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - beacon_payload::deserialize(deserializer) - } -} - -pub mod beacon_payload { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayload, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayload::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayload::deserialize(deserializer).map(Into::into) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_get_payload_header_response() { - let s = r#"{"version":"bellatrix","data":{"message":{"header":{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"value":"1","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}"#; - let resp: GetExecutionPayloadHeaderResponse = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(resp).unwrap()); - } - - #[test] - fn serde_payload_header() { - let s = r#"{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}"#; - let header: ExecutionPayloadHeader = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(header).unwrap()); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/withdrawals.rs b/crates/rpc/rpc-types/src/beacon/withdrawals.rs deleted file mode 100644 index ea2930c5f..000000000 --- a/crates/rpc/rpc-types/src/beacon/withdrawals.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::Withdrawal; -use alloy_primitives::Address; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_with::{serde_as, DeserializeAs, DisplayFromStr, SerializeAs}; - -/// Same as [Withdrawal] but respects the Beacon API format which uses snake-case and quoted -/// decimals. -#[serde_as] -#[derive(Serialize, Deserialize, Clone)] -pub(crate) struct BeaconWithdrawal { - #[serde_as(as = "DisplayFromStr")] - index: u64, - #[serde_as(as = "DisplayFromStr")] - validator_index: u64, - address: Address, - #[serde_as(as = "DisplayFromStr")] - amount: u64, -} - -impl SerializeAs for BeaconWithdrawal { - fn serialize_as(source: &Withdrawal, serializer: S) -> Result - where - S: Serializer, - { - beacon_withdrawals::serialize(source, serializer) - } -} - -impl<'de> DeserializeAs<'de, Withdrawal> for BeaconWithdrawal { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - beacon_withdrawals::deserialize(deserializer) - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_withdrawals { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize(payload_attributes: &Withdrawal, serializer: S) -> Result - where - S: Serializer, - { - let withdrawal = BeaconWithdrawal { - index: payload_attributes.index, - validator_index: payload_attributes.validator_index, - address: payload_attributes.address, - amount: payload_attributes.amount, - }; - withdrawal.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let withdrawal = BeaconWithdrawal::deserialize(deserializer)?; - Ok(Withdrawal { - index: withdrawal.index, - validator_index: withdrawal.validator_index, - address: withdrawal.address, - amount: withdrawal.amount, - }) - } -} diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 01ed0f911..5966a9b72 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -10,7 +10,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -pub mod beacon; mod eth; mod mev; mod net; diff --git a/crates/rpc/rpc-types/src/relay/mod.rs b/crates/rpc/rpc-types/src/relay/mod.rs index 8fed94b79..35daa1b79 100644 --- a/crates/rpc/rpc-types/src/relay/mod.rs +++ b/crates/rpc/rpc-types/src/relay/mod.rs @@ -1,12 +1,10 @@ //! Relay API bindings: -use crate::{ - beacon::{BlsPublicKey, BlsSignature}, - engine::{ - BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, - }, +use crate::engine::{ + BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; use alloy_primitives::{Address, B256, U256}; +use alloy_rpc_types_beacon::beacon::{BlsPublicKey, BlsSignature}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; @@ -104,7 +102,7 @@ pub struct SignedBidSubmissionV1 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v1")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v1")] pub execution_payload: ExecutionPayloadV1, /// The signature associated with the submission. pub signature: BlsSignature, @@ -118,7 +116,7 @@ pub struct SignedBidSubmissionV2 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v2")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v2")] pub execution_payload: ExecutionPayloadV2, /// The signature associated with the submission. pub signature: BlsSignature, @@ -132,7 +130,7 @@ pub struct SignedBidSubmissionV3 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v3")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v3")] pub execution_payload: ExecutionPayloadV3, /// The Deneb block bundle for this bid. pub blobs_bundle: BlobsBundleV1, @@ -146,7 +144,7 @@ pub struct SubmitBlockRequest { /// The BidTrace message associated with the block submission. pub message: BidTrace, /// The execution payload for the block submission. - #[serde(with = "crate::beacon::payload::beacon_payload")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload")] pub execution_payload: ExecutionPayload, /// The signature associated with the block submission. pub signature: BlsSignature, diff --git a/examples/beacon-api-sse/Cargo.toml b/examples/beacon-api-sse/Cargo.toml index 87a882c6c..4582f2598 100644 --- a/examples/beacon-api-sse/Cargo.toml +++ b/examples/beacon-api-sse/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true +alloy-rpc-types-beacon.workspace = true clap.workspace = true tracing.workspace = true diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 38dada132..0cd4d4e78 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -17,10 +17,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_rpc_types_beacon::beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; -use reth::{cli::Cli, rpc::types::beacon::events::PayloadAttributesEvent}; +use reth::cli::Cli; use reth_node_ethereum::EthereumNode; use std::net::{IpAddr, Ipv4Addr}; use tracing::{info, warn}; From 5b4e10cbdcb66dd4b6a20175c31f9b93659774f0 Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Tue, 7 May 2024 15:56:59 +0530 Subject: [PATCH 217/250] Add helpful text for tx pool flags in reth --help (#8134) --- crates/node-core/src/args/network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 7b1b9d0d5..9ff93c5a9 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -93,7 +93,7 @@ pub struct NetworkArgs { /// `GetPooledTransactions` request. Spec'd at 2 MiB. /// /// . - #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.")] + #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.",long_help = None)] pub soft_limit_byte_size_pooled_transactions_response: usize, /// Default soft limit for the byte size of a `PooledTransactions` response on assembling a @@ -101,7 +101,7 @@ pub struct NetworkArgs { /// than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when /// assembling a `PooledTransactions` response. Default /// is 128 KiB. - #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ)] + #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,help = "Sets the soft limit for the byte size of a single pooled transactions response when packing multiple responses into a single packet for a `GetPooledTransactions` request. Specified at 128 Kib by default.",long_help = None)] pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, } From cbc6f268c0316cacc68b407d18d7e697399b700e Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Tue, 7 May 2024 18:08:23 +0530 Subject: [PATCH 218/250] replace reth BlobTransactionSidecar with alloy's (#8135) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 - .../src/commands/debug_cmd/build_block.rs | 2 +- crates/primitives/Cargo.toml | 3 +- crates/primitives/benches/validate_blob_tx.rs | 14 +- crates/primitives/src/eip4844.rs | 16 +- crates/primitives/src/lib.rs | 9 +- crates/primitives/src/transaction/eip4844.rs | 57 +--- crates/primitives/src/transaction/mod.rs | 7 +- crates/primitives/src/transaction/pooled.rs | 2 - crates/primitives/src/transaction/sidecar.rs | 272 ++---------------- crates/transaction-pool/src/blobstore/disk.rs | 15 +- .../transaction-pool/src/test_utils/mock.rs | 7 +- examples/exex/rollup/src/execution.rs | 7 +- 13 files changed, 60 insertions(+), 352 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 866a38167..2ee45d88a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7583,7 +7583,6 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "sha2 0.10.8", "strum", "sucds", "tempfile", diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 22361aada..dd0bfa092 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -213,7 +213,7 @@ impl Command { ))?; let sidecar: BlobTransactionSidecar = - blobs_bundle.pop_sidecar(blob_versioned_hashes.len()).into(); + blobs_bundle.pop_sidecar(blob_versioned_hashes.len()); // first construct the tx, calculating the length of the tx with sidecar before // insertion diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 675c7167f..f4be57f9c 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -45,7 +45,6 @@ once_cell.workspace = true rayon.workspace = true serde.workspace = true serde_json.workspace = true -sha2 = { version = "0.10.7", optional = true } tempfile = { workspace = true, optional = true } thiserror.workspace = true zstd = { version = "0.13", features = ["experimental"], optional = true } @@ -105,7 +104,7 @@ arbitrary = [ "dep:proptest-derive", "zstd-codec", ] -c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:sha2", "dep:tempfile"] +c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:tempfile", "alloy-eips/kzg"] zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 0bc2f04c6..ec62353fb 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] + use alloy_primitives::hex; -use c_kzg::{KzgCommitment, KzgSettings}; +use c_kzg::KzgSettings; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; @@ -10,8 +11,7 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, eip4844::kzg_to_versioned_hash, - BlobTransactionSidecar, TxEip4844, + constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, BlobTransactionSidecar, TxEip4844, }; use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; use std::sync::Arc; @@ -62,13 +62,7 @@ fn validate_blob_tx( } } - tx.blob_versioned_hashes = blob_sidecar - .commitments - .iter() - .map(|commitment| { - kzg_to_versioned_hash(KzgCommitment::from_bytes(&commitment.into_inner()).unwrap()) - }) - .collect(); + tx.blob_versioned_hashes = blob_sidecar.versioned_hashes().collect(); (tx, blob_sidecar) }; diff --git a/crates/primitives/src/eip4844.rs b/crates/primitives/src/eip4844.rs index 4f65cc7ee..0d228528f 100644 --- a/crates/primitives/src/eip4844.rs +++ b/crates/primitives/src/eip4844.rs @@ -1,21 +1,9 @@ //! Helpers for working with EIP-4844 blob fee. -#[cfg(feature = "c-kzg")] -use crate::{constants::eip4844::VERSIONED_HASH_VERSION_KZG, B256}; -#[cfg(feature = "c-kzg")] -use sha2::{Digest, Sha256}; - // re-exports from revm for calculating blob fee pub use crate::revm_primitives::{ calc_blob_gasprice, calc_excess_blob_gas as calculate_excess_blob_gas, }; -/// Calculates the versioned hash for a KzgCommitment -/// -/// Specified in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension) -#[cfg(feature = "c-kzg")] -pub fn kzg_to_versioned_hash(commitment: c_kzg::KzgCommitment) -> B256 { - let mut res = Sha256::digest(commitment.as_slice()); - res[0] = VERSIONED_HASH_VERSION_KZG; - B256::new(res.into()) -} +#[doc(inline)] +pub use alloy_eips::eip4844::kzg_to_versioned_hash; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 3473ef82e..2cd71ae20 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -85,13 +85,14 @@ pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts}; pub use static_file::StaticFileSegment; pub use storage::StorageEntry; -#[cfg(feature = "c-kzg")] pub use transaction::{ - BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, - FromRecoveredPooledTransaction, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, + BlobTransaction, BlobTransactionSidecar, FromRecoveredPooledTransaction, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, }; +#[cfg(feature = "c-kzg")] +pub use transaction::BlobTransactionValidationError; + pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, AccessList, AccessListItem, IntoRecoveredTransaction, InvalidTransactionError, Signature, diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 8356d6788..f2130ce50 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -8,13 +8,7 @@ use reth_codecs::{main_codec, Compact}; use std::mem; #[cfg(feature = "c-kzg")] -use crate::eip4844::kzg_to_versioned_hash; -#[cfg(feature = "c-kzg")] -use crate::kzg::{self, KzgCommitment, KzgProof, KzgSettings}; -#[cfg(feature = "c-kzg")] -use crate::transaction::sidecar::*; -#[cfg(feature = "c-kzg")] -use std::ops::Deref; +use crate::kzg::KzgSettings; /// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) /// @@ -112,57 +106,16 @@ impl TxEip4844 { /// commitments, and proofs. Each blob data element is verified against its commitment and /// proof. /// - /// Returns [BlobTransactionValidationError::InvalidProof] if any blob KZG proof in the response + /// Returns `InvalidProof` if any blob KZG proof in the response /// fails to verify, or if the versioned hashes in the transaction do not match the actual /// commitment versioned hashes. #[cfg(feature = "c-kzg")] pub fn validate_blob( &self, - sidecar: &BlobTransactionSidecar, + sidecar: &crate::BlobTransactionSidecar, proof_settings: &KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { - // Ensure the versioned hashes and commitments have the same length - if self.blob_versioned_hashes.len() != sidecar.commitments.len() { - return Err(kzg::Error::MismatchLength(format!( - "There are {} versioned commitment hashes and {} commitments", - self.blob_versioned_hashes.len(), - sidecar.commitments.len() - )) - .into()) - } - - // zip and iterate, calculating versioned hashes - for (versioned_hash, commitment) in - self.blob_versioned_hashes.iter().zip(sidecar.commitments.iter()) - { - // convert to KzgCommitment - let commitment = KzgCommitment::from(*commitment.deref()); - - // calculate & verify the versioned hash - // https://eips.ethereum.org/EIPS/eip-4844#execution-layer-validation - let calculated_versioned_hash = kzg_to_versioned_hash(commitment); - if *versioned_hash != calculated_versioned_hash { - return Err(BlobTransactionValidationError::WrongVersionedHash { - have: *versioned_hash, - expected: calculated_versioned_hash, - }) - } - } - - // Verify as a batch - let res = KzgProof::verify_blob_kzg_proof_batch( - sidecar.blobs.as_slice(), - sidecar.commitments.as_slice(), - sidecar.proofs.as_slice(), - proof_settings, - ) - .map_err(BlobTransactionValidationError::KZGError)?; - - if res { - Ok(()) - } else { - Err(BlobTransactionValidationError::InvalidProof) - } + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { + sidecar.validate(&self.blob_versioned_hashes, proof_settings) } /// Returns the total gas for all blobs in this transaction. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 7b79a85a2..95407537b 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -25,12 +25,12 @@ pub use error::{ }; pub use legacy::TxLegacy; pub use meta::TransactionMeta; -#[cfg(feature = "c-kzg")] pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] -pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; +pub use sidecar::BlobTransactionValidationError; +pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ @@ -45,9 +45,7 @@ mod eip4844; mod error; mod legacy; mod meta; -#[cfg(feature = "c-kzg")] mod pooled; -#[cfg(feature = "c-kzg")] mod sidecar; mod signature; mod tx_type; @@ -1698,7 +1696,6 @@ impl TryFromRecoveredTransaction for TransactionSignedEcRecovered { /// /// This is a conversion trait that'll ensure transactions received via P2P can be converted to the /// transaction type that the transaction pool uses. -#[cfg(feature = "c-kzg")] pub trait FromRecoveredPooledTransaction { /// Converts to this type from the given [`PooledTransactionsElementEcRecovered`]. fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 5588d45a7..8323de470 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,8 +1,6 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -#![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] - use super::error::TransactionConversionError; use crate::{ Address, BlobTransaction, BlobTransactionSidecar, Bytes, Signature, Transaction, diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 4c2751a86..b4c82b35a 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,48 +1,16 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -#[cfg(any(test, feature = "arbitrary"))] use crate::{ - constants::eip4844::{FIELD_ELEMENTS_PER_BLOB, MAINNET_KZG_TRUSTED_SETUP}, - kzg::{KzgCommitment, KzgProof, BYTES_PER_FIELD_ELEMENT}, -}; -use crate::{ - keccak256, - kzg::{ - self, Blob, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF, - }, - Signature, Transaction, TransactionSigned, TxEip4844, TxHash, B256, EIP4844_TX_TYPE_ID, + keccak256, Signature, Transaction, TransactionSigned, TxEip4844, TxHash, EIP4844_TX_TYPE_ID, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use bytes::BufMut; -#[cfg(any(test, feature = "arbitrary"))] -use proptest::{ - arbitrary::{any as proptest_any, ParamsFor}, - collection::vec as proptest_vec, - strategy::{BoxedStrategy, Strategy}, -}; use serde::{Deserialize, Serialize}; -/// An error that can occur when validating a [BlobTransaction]. -#[derive(Debug, thiserror::Error)] -pub enum BlobTransactionValidationError { - /// Proof validation failed. - #[error("invalid KZG proof")] - InvalidProof, - /// An error returned by [`kzg`]. - #[error("KZG error: {0:?}")] - KZGError(#[from] kzg::Error), - /// The inner transaction is not a blob transaction. - #[error("unable to verify proof for non blob transaction: {0}")] - NotBlobTransaction(u8), - /// The versioned hash is incorrect. - #[error("wrong versioned hash: have {have}, expected {expected}")] - WrongVersionedHash { - /// The versioned hash we got - have: B256, - /// The versioned hash we expected - expected: B256, - }, -} +#[doc(inline)] +pub use alloy_eips::eip4844::BlobTransactionSidecar; + +#[cfg(feature = "c-kzg")] +pub use alloy_eips::eip4844::BlobTransactionValidationError; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. @@ -83,9 +51,10 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// /// See also [TxEip4844::validate_blob] + #[cfg(feature = "c-kzg")] pub fn validate( &self, - proof_settings: &KzgSettings, + proof_settings: &c_kzg::KzgSettings, ) -> Result<(), BlobTransactionValidationError> { self.transaction.validate_blob(&self.sidecar, proof_settings) } @@ -168,7 +137,7 @@ impl BlobTransaction { self.signature.encode(out); // Encode the blobs, commitments, and proofs - self.sidecar.encode_inner(out); + self.sidecar.encode(out); } /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, @@ -274,7 +243,7 @@ impl BlobTransaction { } // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode_inner(data)?; + let sidecar = BlobTransactionSidecar::decode(data)?; // # Calculating the hash // @@ -306,204 +275,21 @@ impl BlobTransaction { } } -/// This represents a set of blobs, and its corresponding commitments and proofs. -#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] -#[repr(C)] -pub struct BlobTransactionSidecar { - /// The blob data. - pub blobs: Vec, - /// The blob commitments. - pub commitments: Vec, - /// The blob proofs. - pub proofs: Vec, -} - -impl BlobTransactionSidecar { - /// Creates a new [BlobTransactionSidecar] using the given blobs, commitments, and proofs. - pub fn new(blobs: Vec, commitments: Vec, proofs: Vec) -> Self { - Self { blobs, commitments, proofs } - } - - /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. - /// - /// This encodes the fields in the following order: - /// - `blobs` - /// - `commitments` - /// - `proofs` - #[inline] - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - BlobTransactionSidecarRlp::wrap_ref(self).encode(out); - } - - /// Outputs the RLP length of the [BlobTransactionSidecar] fields, without a RLP header. - pub fn fields_len(&self) -> usize { - BlobTransactionSidecarRlp::wrap_ref(self).fields_len() - } - - /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. - /// - /// This decodes the fields in the following order: - /// - `blobs` - /// - `commitments` - /// - `proofs` - #[inline] - pub(crate) fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(BlobTransactionSidecarRlp::decode(buf)?.unwrap()) - } - - /// Calculates a size heuristic for the in-memory size of the [BlobTransactionSidecar]. - #[inline] - pub fn size(&self) -> usize { - self.blobs.len() * BYTES_PER_BLOB + // blobs - self.commitments.len() * BYTES_PER_COMMITMENT + // commitments - self.proofs.len() * BYTES_PER_PROOF // proofs - } -} - -impl From for BlobTransactionSidecar { - fn from(value: reth_rpc_types::BlobTransactionSidecar) -> Self { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(value) } - } -} - -impl From for reth_rpc_types::BlobTransactionSidecar { - fn from(value: BlobTransactionSidecar) -> Self { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(value) } - } -} - -impl Encodable for BlobTransactionSidecar { - /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out) - } - - fn length(&self) -> usize { - self.fields_len() - } -} - -impl Decodable for BlobTransactionSidecar { - /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Self::decode_inner(buf) - } -} - -// Wrapper for c-kzg rlp -#[repr(C)] -struct BlobTransactionSidecarRlp { - blobs: Vec<[u8; BYTES_PER_BLOB]>, - commitments: Vec<[u8; BYTES_PER_COMMITMENT]>, - proofs: Vec<[u8; BYTES_PER_PROOF]>, -} - -const _: [(); std::mem::size_of::()] = - [(); std::mem::size_of::()]; - -const _: [(); std::mem::size_of::()] = - [(); std::mem::size_of::()]; - -impl BlobTransactionSidecarRlp { - fn wrap_ref(other: &BlobTransactionSidecar) -> &Self { - // SAFETY: Same repr and size - unsafe { &*(other as *const BlobTransactionSidecar).cast::() } - } - - fn unwrap(self) -> BlobTransactionSidecar { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(self) } - } - - fn encode(&self, out: &mut dyn bytes::BufMut) { - // Encode the blobs, commitments, and proofs - self.blobs.encode(out); - self.commitments.encode(out); - self.proofs.encode(out); - } - - fn fields_len(&self) -> usize { - self.blobs.length() + self.commitments.length() + self.proofs.length() - } - - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { - blobs: Decodable::decode(buf)?, - commitments: Decodable::decode(buf)?, - proofs: Decodable::decode(buf)?, - }) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlobTransactionSidecar { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Note: the "fix" for this is kinda pointless. - #[allow(clippy::large_stack_frames)] - let blobs: Vec = (0..u.int_in_range(1..=16)?) - .map(|_| { - arr = arbitrary::Arbitrary::arbitrary(u).unwrap(); - - // Ensure that each blob is canonical by ensuring each field element contained in - // the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - arr[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - Blob::from(arr) - }) - .collect(); - - Ok(generate_blob_sidecar(blobs)) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for BlobTransactionSidecar { - type Parameters = ParamsFor; - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - proptest_vec(proptest_vec(proptest_any::(), BYTES_PER_BLOB), 1..=5) - .prop_map(move |blobs| { - let blobs = blobs - .into_iter() - .map(|mut blob| { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Ensure that each blob is canonical by ensuring each field element - // contained in the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - blob[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - arr.copy_from_slice(blob.as_slice()); - arr.into() - }) - .collect(); - - generate_blob_sidecar(blobs) - }) - .boxed() - } - - type Strategy = BoxedStrategy; -} - /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { +#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] +pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { + use crate::constants::eip4844::MAINNET_KZG_TRUSTED_SETUP; + use c_kzg::{KzgCommitment, KzgProof}; + let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); - let commitments: Vec = blobs + let commitments: Vec = blobs .iter() .map(|blob| KzgCommitment::blob_to_kzg_commitment(&blob.clone(), &kzg_settings).unwrap()) .map(|commitment| commitment.to_bytes()) .collect(); - let proofs: Vec = blobs + let proofs: Vec = blobs .iter() .zip(commitments.iter()) .map(|(blob, commitment)| { @@ -512,18 +298,15 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { .map(|proof| proof.to_bytes()) .collect(); - BlobTransactionSidecar { blobs, commitments, proofs } + BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) } -#[cfg(test)] +#[cfg(all(test, feature = "c-kzg"))] mod tests { - use crate::{ - hex, - kzg::{Blob, Bytes48}, - transaction::sidecar::generate_blob_sidecar, - BlobTransactionSidecar, - }; - use std::{fs, path::PathBuf}; + use super::*; + use crate::{hex, kzg::Blob}; + use alloy_eips::eip4844::Bytes48; + use std::{fs, path::PathBuf, str::FromStr}; #[test] fn test_blob_transaction_sidecar_generation() { @@ -550,7 +333,7 @@ mod tests { assert_eq!( sidecar.commitments, vec![ - Bytes48::from_hex(json_value.get("commitment").unwrap().as_str().unwrap()).unwrap() + Bytes48::from_str(json_value.get("commitment").unwrap().as_str().unwrap()).unwrap() ] ); } @@ -624,7 +407,7 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode_inner(&mut encoded_rlp); + sidecar.encode(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -655,11 +438,10 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode_inner(&mut encoded_rlp); + sidecar.encode(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = - BlobTransactionSidecar::decode_inner(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index ae6ff97b2..5f44c87f5 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -445,7 +445,6 @@ pub enum OpenDiskFileBlobStore { #[cfg(test)] mod tests { use super::*; - use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use std::sync::atomic::Ordering; fn tmp_store() -> (DiskFileBlobStore, tempfile::TempDir) { @@ -455,11 +454,15 @@ mod tests { } fn rng_blobs(num: usize) -> Vec<(TxHash, BlobTransactionSidecar)> { - let mut runner = TestRunner::new(Default::default()); - prop::collection::vec(any::<(TxHash, BlobTransactionSidecar)>(), num) - .new_tree(&mut runner) - .unwrap() - .current() + let mut rng = rand::thread_rng(); + (0..num) + .map(|_| { + let tx = TxHash::random_with(&mut rng); + let blob = + BlobTransactionSidecar { blobs: vec![], commitments: vec![], proofs: vec![] }; + (tx, blob) + }) + .collect() } #[test] diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 17ad1f7c3..948c47109 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -14,7 +14,6 @@ use rand::{ }; use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, - eip4844::kzg_to_versioned_hash, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, ChainId, FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, @@ -987,11 +986,7 @@ impl From for Transaction { to, value, access_list, - blob_versioned_hashes: sidecar - .commitments - .into_iter() - .map(|commitment| kzg_to_versioned_hash((*commitment).into())) - .collect(), + blob_versioned_hashes: sidecar.versioned_hashes().collect(), max_fee_per_blob_gas, input, }), diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index f7a98382e..1403833d3 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -1,3 +1,4 @@ +use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; use alloy_consensus::{Blob, SidecarCoder, SimpleCoder}; use alloy_rlp::Decodable as _; use eyre::OptionExt; @@ -20,8 +21,6 @@ use reth_revm::{ }; use reth_tracing::tracing::debug; -use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; - /// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle /// state)[BundleState] and list of (receipts)[Receipt]. pub async fn execute_block( @@ -154,7 +153,7 @@ async fn decode_transactions( let blobs = blobs .into_iter() // Convert blob KZG commitments to versioned hashes - .map(|(blob, commitment)| (blob, kzg_to_versioned_hash((*commitment).into()))) + .map(|(blob, commitment)| (blob, kzg_to_versioned_hash(commitment.as_slice()))) // Filter only blobs that are present in the block data .filter(|(_, hash)| blob_hashes.contains(hash)) .map(|(blob, _)| Blob::from(*blob)) @@ -461,7 +460,7 @@ mod tests { SidecarBuilder::::from_slice(&encoded_transactions).build()?; let blob_hashes = alloy_rlp::encode(sidecar.versioned_hashes().collect::>()); - let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar.into()); + let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar); let transaction = sign_tx_with_key_pair(key_pair, Transaction::from(mock_transaction.clone())); mock_transaction.set_hash(transaction.hash); From f281bbdccda2e08b1d5db651f25c7735ea1d63ed Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 15:29:16 +0200 Subject: [PATCH 219/250] fix(op): stages checkpoints init-state (#8021) --- bin/reth/src/commands/import.rs | 15 ++------ crates/node-core/src/init.rs | 31 +++++++++-------- crates/primitives/src/stage/checkpoints.rs | 40 ++++++++++++++++++++++ crates/primitives/src/stage/id.rs | 11 ++++++ 4 files changed, 70 insertions(+), 27 deletions(-) diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 0d5b24275..3496077ae 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -43,17 +43,6 @@ use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::{debug, error, info}; -/// Stages that require state. -const STATE_STAGES: &[StageId] = &[ - StageId::Execution, - StageId::MerkleUnwind, - StageId::AccountHashing, - StageId::StorageHashing, - StageId::MerkleExecute, - StageId::IndexStorageHistory, - StageId::IndexAccountHistory, -]; - /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] pub struct ImportCommand { @@ -171,7 +160,7 @@ impl ImportCommand { provider_factory.static_file_provider(), PruneModes::default(), ), - true, + self.no_state, ) .await?; @@ -307,7 +296,7 @@ where config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), ExExManagerHandle::empty(), )) - .disable_all_if(STATE_STAGES, || should_exec), + .disable_all_if(&StageId::STATE_REQUIRED, || should_exec), ) .build(provider_factory, static_file_producer); diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 8a7751e4e..b09e29e53 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -2,23 +2,20 @@ use reth_codecs::Compact; use reth_config::config::EtlConfig; -use reth_db::{ - database::Database, - tables, - transaction::{DbTx, DbTxMut}, -}; +use reth_db::{database::Database, tables, transaction::DbTxMut}; use reth_etl::Collector; use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ - stage::StageId, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, - StaticFileSegment, StorageEntry, B256, U256, + stage::{StageCheckpoint, StageId}, + Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, StaticFileSegment, + StorageEntry, B256, U256, }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, StaticFileProviderFactory, + ProviderFactory, StageCheckpointWriter, StaticFileProviderFactory, }; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use serde::{Deserialize, Serialize}; @@ -114,18 +111,18 @@ pub fn init_genesis(factory: ProviderFactory) -> Result(&tx, &static_file_provider, chain.clone())?; + insert_genesis_header::(tx, &static_file_provider, chain.clone())?; - insert_genesis_state::(&tx, alloc.len(), alloc.iter())?; + insert_genesis_state::(tx, alloc.len(), alloc.iter())?; // insert sync stage - for stage in StageId::ALL.iter() { - tx.put::(stage.to_string(), Default::default())?; + for stage in StageId::ALL { + provider_rw.save_stage_checkpoint(stage, Default::default())?; } - tx.commit()?; + provider_rw.commit()?; static_file_provider.commit()?; Ok(hash) @@ -343,6 +340,11 @@ pub fn init_from_state_dump( ); } + // insert sync stages for stages that require state + for stage in StageId::STATE_REQUIRED { + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?; + } + provider_rw.commit()?; Ok(hash) @@ -524,6 +526,7 @@ mod tests { cursor::DbCursorRO, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, table::{Table, TableRow}, + transaction::DbTx, DatabaseEnv, }; use reth_primitives::{ diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index 461e15401..d9c10605c 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -6,6 +6,8 @@ use bytes::Buf; use reth_codecs::{main_codec, Compact}; use std::ops::RangeInclusive; +use super::StageId; + /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq)] pub struct MerkleCheckpoint { @@ -201,6 +203,25 @@ impl StageCheckpoint { self } + /// Sets the block range, if checkpoint uses block range. + pub fn with_block_range(mut self, stage_id: &StageId, from: u64, to: u64) -> Self { + self.stage_checkpoint = Some(match stage_id { + StageId::Execution => StageUnitCheckpoint::Execution(ExecutionCheckpoint::default()), + StageId::AccountHashing => { + StageUnitCheckpoint::Account(AccountHashingCheckpoint::default()) + } + StageId::StorageHashing => { + StageUnitCheckpoint::Storage(StorageHashingCheckpoint::default()) + } + StageId::IndexStorageHistory | StageId::IndexAccountHistory => { + StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default()) + } + _ => return self, + }); + _ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to)); + self + } + /// Get the underlying [`EntitiesCheckpoint`], if any, to determine the number of entities /// processed, and the number of total entities to process. pub fn entities(&self) -> Option { @@ -244,6 +265,25 @@ pub enum StageUnitCheckpoint { IndexHistory(IndexHistoryCheckpoint), } +impl StageUnitCheckpoint { + /// Sets the block range. Returns old block range, or `None` if checkpoint doesn't use block + /// range. + pub fn set_block_range(&mut self, from: u64, to: u64) -> Option { + match self { + Self::Account(AccountHashingCheckpoint { ref mut block_range, .. }) | + Self::Storage(StorageHashingCheckpoint { ref mut block_range, .. }) | + Self::Execution(ExecutionCheckpoint { ref mut block_range, .. }) | + Self::IndexHistory(IndexHistoryCheckpoint { ref mut block_range, .. }) => { + let old_range = *block_range; + *block_range = CheckpointBlockRange { from, to }; + + Some(old_range) + } + _ => None, + } + } +} + #[cfg(test)] impl Default for StageUnitCheckpoint { fn default() -> Self { diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index d4926fea1..2779c2608 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -53,6 +53,17 @@ impl StageId { StageId::Finish, ]; + /// Stages that require state. + pub const STATE_REQUIRED: [StageId; 7] = [ + StageId::Execution, + StageId::MerkleUnwind, + StageId::AccountHashing, + StageId::StorageHashing, + StageId::MerkleExecute, + StageId::IndexStorageHistory, + StageId::IndexAccountHistory, + ]; + /// Return stage id formatted as string. pub fn as_str(&self) -> &str { match self { From bcb0bff382c0fb4ce0b7761a974b9d5998a2c5a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 May 2024 17:28:42 +0200 Subject: [PATCH 220/250] chore: rm redundant optimism feature (#8136) --- crates/consensus/common/Cargo.toml | 3 --- crates/node-core/Cargo.toml | 1 - crates/revm/Cargo.toml | 1 - 3 files changed, 5 deletions(-) diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 5e5a6ef57..af93788ee 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -21,6 +21,3 @@ reth-consensus.workspace=true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } mockall = "0.12" - -[features] -optimism = ["reth-primitives/optimism"] diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index e19b4d242..ef5d63b3f 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -107,7 +107,6 @@ optimism = [ "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-consensus-common/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 151d53a97..ca52c7c90 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -37,7 +37,6 @@ optimism = [ "revm/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-consensus-common/optimism", "reth-interfaces/optimism", ] js-tracer = ["revm-inspectors/js-tracer"] From 05e434eae3c60208118ddc2e27d6b00b3d6e851e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 May 2024 17:28:53 +0200 Subject: [PATCH 221/250] feat: rm txmeta associated type (#8138) --- crates/ethereum/evm/src/execute.rs | 12 ++---------- crates/ethereum/evm/src/lib.rs | 9 ++------- crates/evm/src/lib.rs | 17 +++++------------ crates/optimism/evm/src/execute.rs | 18 ++++-------------- crates/optimism/evm/src/lib.rs | 13 +++++-------- crates/revm/src/test_utils.rs | 19 ++++++++----------- examples/custom-evm/src/main.rs | 11 +++-------- 7 files changed, 29 insertions(+), 70 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index db361f35d..c80e476bc 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -10,7 +10,7 @@ use reth_evm::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, @@ -62,7 +62,6 @@ impl EthExecutorProvider { impl EthExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { fn eth_executor(&self, db: DB) -> EthBlockExecutor where @@ -79,7 +78,6 @@ where impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { type Executor> = EthBlockExecutor; @@ -117,7 +115,6 @@ struct EthEvmExecutor { impl EthEvmExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { /// Executes the transactions in the block and returns the receipts. /// @@ -158,7 +155,7 @@ where .into()) } - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, ()); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { @@ -238,8 +235,6 @@ impl EthBlockExecutor { impl EthBlockExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { /// Configures a new evm configuration and block environment for the given block. @@ -353,7 +348,6 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; @@ -403,8 +397,6 @@ impl EthBatchExecutor { impl BatchExecutor for EthBatchExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 0c8506ff7..7799cf410 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -12,7 +12,7 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm::{config::revm_spec, env::fill_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, ChainSpec, Head, Header, Transaction, U256, + Address, ChainSpec, Head, Header, TransactionSigned, U256, }; use reth_revm::{Database, EvmBuilder}; pub mod execute; @@ -27,12 +27,7 @@ pub mod dao_fork; pub struct EthEvmConfig; impl ConfigureEvmEnv for EthEvmConfig { - type TxMeta = (); - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, _meta: ()) - where - T: AsRef, - { + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { fill_tx_env(tx_env, transaction, sender) } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index c69e33d65..94cac8bcc 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -8,7 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Transaction, U256}; +use reth_primitives::{ + revm::env::fill_block_env, Address, ChainSpec, Header, TransactionSigned, U256, +}; use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; @@ -92,17 +94,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { - /// The type of the transaction metadata that should be used to fill fields in the transaction - /// environment. - /// - /// On ethereum mainnet, this is `()`, and on optimism these are the L1 fee fields and - /// additional L1 block info. - type TxMeta; - - /// Fill transaction environment from a [Transaction] and the given sender address. - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef; + /// Fill transaction environment from a [TransactionSigned] and the given sender address. + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); /// Fill [CfgEnvWithHandlerCfg] fields according to the chain spec and given header fn fill_cfg_env( diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index c6bb5c7cf..f729ceda1 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -9,15 +9,15 @@ use reth_evm::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; use reth_primitives::{ - BlockNumber, BlockWithSenders, Bytes, ChainSpec, GotExpected, Hardfork, Header, PruneModes, - Receipt, Receipts, TxType, Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, + Receipts, TxType, Withdrawals, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -56,7 +56,6 @@ impl OpExecutorProvider { impl OpExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { fn op_executor(&self, db: DB) -> OpBlockExecutor where @@ -73,7 +72,6 @@ where impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { type Executor> = OpBlockExecutor; @@ -110,7 +108,6 @@ struct OpEvmExecutor { impl OpEvmExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { /// Executes the transactions in the block and returns the receipts. /// @@ -182,9 +179,7 @@ where .transpose() .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { @@ -274,8 +269,6 @@ impl OpBlockExecutor { impl OpBlockExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { /// Configures a new evm configuration and block environment for the given block. @@ -375,7 +368,6 @@ where impl Executor for OpBlockExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; @@ -428,8 +420,6 @@ impl OpBatchExecutor { impl BatchExecutor for OpBatchExecutor where EvmConfig: ConfigureEvm, - // TODO: get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 748eeab7b..31d39fcb6 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,7 +13,7 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm::{config::revm_spec, env::fill_op_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Bytes, ChainSpec, Head, Header, Transaction, U256, + Address, ChainSpec, Head, Header, TransactionSigned, U256, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; @@ -32,13 +32,10 @@ pub use error::OptimismBlockExecutionError; pub struct OptimismEvmConfig; impl ConfigureEvmEnv for OptimismEvmConfig { - type TxMeta = Bytes; - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) - where - T: AsRef, - { - fill_op_tx_env(tx_env, transaction, sender, meta); + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + fill_op_tx_env(tx_env, transaction, sender, buf.into()); } fn fill_cfg_env( diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 73df4ea4b..48e6e7c4d 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -2,7 +2,7 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_interfaces::provider::ProviderResult; use reth_primitives::{ keccak256, revm::config::revm_spec, trie::AccountProof, Account, Address, BlockNumber, - Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, Transaction, B256, U256, + Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, TransactionSigned, B256, U256, }; #[cfg(not(feature = "optimism"))] @@ -114,20 +114,17 @@ impl StateProvider for StateProviderTest { pub struct TestEvmConfig; impl ConfigureEvmEnv for TestEvmConfig { - #[cfg(not(feature = "optimism"))] - type TxMeta = (); - #[cfg(feature = "optimism")] - type TxMeta = Bytes; - #[allow(unused_variables)] - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef, - { + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { #[cfg(not(feature = "optimism"))] fill_tx_env(tx_env, transaction, sender); + #[cfg(feature = "optimism")] - fill_op_tx_env(tx_env, transaction, sender, meta); + { + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + fill_op_tx_env(tx_env, transaction, sender, buf.into()); + } } fn fill_cfg_env( diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index d2c016add..9572e38be 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -20,7 +20,7 @@ use reth::{ use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; -use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; +use reth_primitives::{Chain, ChainSpec, Genesis, Header, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -61,13 +61,8 @@ impl MyEvmConfig { } impl ConfigureEvmEnv for MyEvmConfig { - type TxMeta = (); - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef, - { - EthEvmConfig::fill_tx_env(tx_env, transaction, sender, meta) + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::fill_tx_env(tx_env, transaction, sender) } fn fill_cfg_env( From a2623e83642fe696b87189209b17b22d88a07a47 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 7 May 2024 16:46:11 +0100 Subject: [PATCH 222/250] fix: ensures that pruning data from static files only happens on calling `commit()` (#8101) --- crates/stages/src/stages/bodies.rs | 10 +- crates/stages/src/stages/execution.rs | 6 +- crates/stages/src/stages/merkle.rs | 1 + .../static-file/src/static_file_producer.rs | 11 +- .../src/providers/database/provider.rs | 5 +- .../src/providers/static_file/writer.rs | 106 +++++++++++++++--- 6 files changed, 114 insertions(+), 25 deletions(-) diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 5080b9b9e..bce56880a 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -146,8 +146,13 @@ impl Stage for BodyStage { // If static files are ahead, then we didn't reach the database commit in a previous // stage run. So, our only solution is to unwind the static files and proceed from the // database expected height. - Ordering::Greater => static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?, + Ordering::Greater => { + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } // If static files are behind, then there was some corruption or loss of files. This // error will trigger an unwind, that will bring the database to the same height as the // static files. @@ -576,6 +581,7 @@ mod tests { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); static_file_producer.prune_transactions(1, checkpoint.block_number).unwrap(); + static_file_producer.commit().unwrap(); } // Unwind all of it let unwind_to = 1; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 0db907211..6d2eb2a5d 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -169,7 +169,11 @@ where let static_file_producer = if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - Some(prepare_static_file_producer(provider, start_block)?) + let mut producer = prepare_static_file_producer(provider, start_block)?; + // Since there might be a database <-> static file inconsistency (read + // `prepare_static_file_producer` for context), we commit the change straight away. + producer.commit()?; + Some(producer) } else { None }; diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 77fcf2e15..cdf33b40f 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -582,6 +582,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); + writer.commit().unwrap(); writer.append_header(last_header, U256::ZERO, hash).unwrap(); writer.commit().unwrap(); diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 0b0720e21..c7a365c9a 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -272,12 +272,13 @@ mod tests { db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); // Unwind headers from static_files and manually insert them into the database, so we're // able to check that static_file_producer works - db.factory - .static_file_provider() + let static_file_provider = db.factory.static_file_provider(); + let mut static_file_writer = static_file_provider .latest_writer(StaticFileSegment::Headers) - .expect("get static file writer for headers") - .prune_headers(blocks.len() as u64) - .expect("prune headers"); + .expect("get static file writer for headers"); + static_file_writer.prune_headers(blocks.len() as u64).unwrap(); + static_file_writer.commit().expect("prune headers"); + let tx = db.factory.db_ref().tx_mut().expect("init tx"); blocks.iter().for_each(|block| { TestStageDB::insert_header(None, &tx, &block.header, U256::ZERO) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2cae000ce..428645f1a 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1112,7 +1112,10 @@ impl HeaderSyncGapProvider for DatabaseProvider { Ordering::Greater => { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - static_file_producer.prune_headers(next_static_file_block_num - next_block)? + static_file_producer.prune_headers(next_static_file_block_num - next_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()? } Ordering::Less => { // There's either missing or corrupted files. diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index d1aa8560f..3a0f2d031 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -30,10 +30,17 @@ pub struct StaticFileProviderRW { /// stored in a [dashmap::DashMap] inside the parent [StaticFileProvider].which is an [Arc]. /// If we were to use an [Arc] here, we would create a reference cycle. reader: Weak, + /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, + /// Path to opened file. data_path: PathBuf, + /// Reusable buffer for encoding appended data. buf: Vec, + /// Metrics. metrics: Option>, + /// On commit, does the instructed pruning: number of lines, and if it applies, the last block + /// it ends at. + prune_on_commit: Option<(u64, Option)>, } impl StaticFileProviderRW { @@ -45,7 +52,14 @@ impl StaticFileProviderRW { metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; - Ok(Self { writer, data_path, buf: Vec::with_capacity(100), reader, metrics }) + Ok(Self { + writer, + data_path, + buf: Vec::with_capacity(100), + reader, + metrics, + prune_on_commit: None, + }) } fn open( @@ -100,6 +114,18 @@ impl StaticFileProviderRW { pub fn commit(&mut self) -> ProviderResult<()> { let start = Instant::now(); + // Truncates the data file if instructed to. + if let Some((to_delete, last_block_number)) = self.prune_on_commit.take() { + match self.writer.user_header().segment() { + StaticFileSegment::Headers => self.prune_header_data(to_delete)?, + StaticFileSegment::Transactions => self + .prune_transaction_data(to_delete, last_block_number.expect("should exist"))?, + StaticFileSegment::Receipts => { + self.prune_receipt_data(to_delete, last_block_number.expect("should exist"))? + } + } + } + // Commits offsets and new user_header to disk self.writer.commit().map_err(|e| ProviderError::NippyJar(e.to_string()))?; @@ -372,6 +398,7 @@ impl StaticFileProviderRW { hash: BlockHash, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); @@ -404,6 +431,7 @@ impl StaticFileProviderRW { tx: TransactionSignedNoHash, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; let result = self.append_with_tx_number(StaticFileSegment::Transactions, tx_num, tx)?; @@ -430,6 +458,7 @@ impl StaticFileProviderRW { receipt: Receipt, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; let result = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; @@ -444,13 +473,64 @@ impl StaticFileProviderRW { Ok(result) } - /// Removes the last `number` of transactions from static files. + /// Adds an instruction to prune `to_delete`transactions during commit. /// - /// # Note - /// Commits to the configuration file at the end. + /// Note: `last_block` refers to the block the unwinds ends at. pub fn prune_transactions( &mut self, - number: u64, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Transactions); + self.queue_prune(to_delete, Some(last_block)) + } + + /// Adds an instruction to prune `to_delete` receipts during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at. + pub fn prune_receipts( + &mut self, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Receipts); + self.queue_prune(to_delete, Some(last_block)) + } + + /// Adds an instruction to prune `to_delete` headers during commit. + pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Headers); + self.queue_prune(to_delete, None) + } + + /// Adds an instruction to prune `to_delete` elements during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at if dealing with transaction-based + /// data. + fn queue_prune( + &mut self, + to_delete: u64, + last_block: Option, + ) -> ProviderResult<()> { + self.ensure_no_queued_prune()?; + self.prune_on_commit = Some((to_delete, last_block)); + Ok(()) + } + + /// Returns Error if there is a pruning instruction that needs to be applied. + fn ensure_no_queued_prune(&self) -> ProviderResult<()> { + if self.prune_on_commit.is_some() { + return Err(ProviderError::NippyJar( + "Pruning should be comitted before appending or pruning more data".to_string(), + )); + } + Ok(()) + } + + /// Removes the last `to_delete` transactions from the data file. + fn prune_transaction_data( + &mut self, + to_delete: u64, last_block: BlockNumber, ) -> ProviderResult<()> { let start = Instant::now(); @@ -458,7 +538,7 @@ impl StaticFileProviderRW { let segment = StaticFileSegment::Transactions; debug_assert!(self.writer.user_header().segment() == segment); - self.truncate(segment, number, Some(last_block))?; + self.truncate(segment, to_delete, Some(last_block))?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -471,11 +551,8 @@ impl StaticFileProviderRW { Ok(()) } - /// Prunes `to_delete` number of receipts from static_files. - /// - /// # Note - /// Commits to the configuration file at the end. - pub fn prune_receipts( + /// Prunes the last `to_delete` receipts from the data file. + fn prune_receipt_data( &mut self, to_delete: u64, last_block: BlockNumber, @@ -498,11 +575,8 @@ impl StaticFileProviderRW { Ok(()) } - /// Prunes `to_delete` number of headers from static_files. - /// - /// # Note - /// Commits to the configuration file at the end. - pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { + /// Prunes the last `to_delete` headers from the data file. + fn prune_header_data(&mut self, to_delete: u64) -> ProviderResult<()> { let start = Instant::now(); let segment = StaticFileSegment::Headers; From 00f9acb94eeac76c4d204bb37011007797daa6ea Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 May 2024 18:55:46 +0200 Subject: [PATCH 223/250] chore: remove reth-revm optimism (#8141) --- Cargo.lock | 4 +- bin/reth/Cargo.toml | 1 - crates/blockchain-tree/Cargo.toml | 2 +- crates/consensus/beacon/Cargo.toml | 1 - crates/optimism/evm/Cargo.toml | 1 - crates/optimism/node/Cargo.toml | 1 - crates/payload/optimism/Cargo.toml | 1 - crates/revm/Cargo.toml | 12 +-- crates/revm/src/lib.rs | 1 - crates/revm/src/test_utils.rs | 98 +--------------------- crates/rpc/rpc/Cargo.toml | 4 +- crates/rpc/rpc/src/eth/api/call.rs | 3 +- crates/rpc/rpc/src/eth/api/transactions.rs | 6 +- crates/rpc/rpc/src/eth/error.rs | 2 +- crates/rpc/rpc/src/trace.rs | 10 +-- 15 files changed, 18 insertions(+), 129 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ee45d88a..76ab2dccc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5387,7 +5387,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.60", @@ -7653,13 +7653,11 @@ name = "reth-revm" version = "0.2.0-beta.6" dependencies = [ "reth-consensus-common", - "reth-evm", "reth-interfaces", "reth-primitives", "reth-provider", "reth-trie", "revm", - "revm-inspectors", "tracing", ] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 3f5d78834..37b26686f 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -133,7 +133,6 @@ min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ "reth-primitives/optimism", - "reth-revm/optimism", "reth-interfaces/optimism", "reth-rpc/optimism", "reth-provider/optimism", diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 912f593dc..70ce9a290 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -50,4 +50,4 @@ assert_matches.workspace = true [features] test-utils = [] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism", "reth-revm/optimism"] +optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism"] diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 8fb9d3ec3..7b106b2d3 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -71,6 +71,5 @@ optimism = [ "reth-provider/optimism", "reth-blockchain-tree/optimism", "reth-beacon-consensus-core/optimism", - "reth-revm/optimism", "reth-rpc/optimism" ] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 4e5fd2f19..a1c3a168b 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -33,7 +33,6 @@ reth-revm = { workspace = true, features = ["test-utils"] } optimism = [ "reth-primitives/optimism", "reth-provider/optimism", - "reth-revm/optimism", "reth-interfaces/optimism", "revm-primitives/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index be8791c78..9432ce9ed 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -64,7 +64,6 @@ optimism = [ "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc/optimism", - "reth-revm/optimism", "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index c58d0ecb5..567c02833 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -37,7 +37,6 @@ sha2.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-revm/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-evm-optimism/optimism", diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index ca52c7c90..2b621ed76 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -17,26 +17,16 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true -reth-evm = { workspace = true, optional = true } reth-trie = { workspace = true, optional = true } # revm revm.workspace = true -revm-inspectors.workspace = true # common tracing.workspace = true [dev-dependencies] -reth-evm.workspace = true reth-trie.workspace = true [features] -test-utils = ["dep:reth-trie", "dep:reth-evm"] -optimism = [ - "revm/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-interfaces/optimism", -] -js-tracer = ["revm-inspectors/js-tracer"] +test-utils = ["dep:reth-trie"] diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 7f950afb0..8e5419567 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -22,4 +22,3 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; -pub use revm_inspectors::*; diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 48e6e7c4d..8c4d1894c 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,31 +1,13 @@ -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_interfaces::provider::ProviderResult; use reth_primitives::{ - keccak256, revm::config::revm_spec, trie::AccountProof, Account, Address, BlockNumber, - Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, TransactionSigned, B256, U256, + keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, + B256, U256, }; - -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; use reth_provider::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; use reth_trie::updates::TrieUpdates; -use revm::{ - db::BundleState, - primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, -}; +use revm::db::BundleState; use std::collections::HashMap; -#[cfg(feature = "optimism")] -use { - reth_primitives::revm::env::fill_op_tx_env, - revm::{inspector_handle_register, GetInspector}, -}; - -use revm::{ - primitives::{HandlerCfg, SpecId}, - Database, Evm, EvmBuilder, -}; - /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct StateProviderTest { @@ -107,77 +89,3 @@ impl StateProvider for StateProviderTest { unimplemented!("proof generation is not supported") } } - -/// Test EVM configuration. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct TestEvmConfig; - -impl ConfigureEvmEnv for TestEvmConfig { - #[allow(unused_variables)] - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - #[cfg(not(feature = "optimism"))] - fill_tx_env(tx_env, transaction, sender); - - #[cfg(feature = "optimism")] - { - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - fill_op_tx_env(tx_env, transaction, sender, buf.into()); - } - } - - fn fill_cfg_env( - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = revm_spec( - chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); - - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; - - cfg_env.handler_cfg.spec_id = spec_id; - #[cfg(feature = "optimism")] - { - cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); - } - } -} - -impl ConfigureEvm for TestEvmConfig { - type DefaultExternalContext<'a> = (); - - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - #[cfg(feature = "optimism")] - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; - #[cfg(not(feature = "optimism"))] - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST }; - EvmBuilder::default().with_db(db).with_handler_cfg(handler_cfg).build() - } - - #[cfg(feature = "optimism")] - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> - where - DB: Database + 'a, - I: GetInspector, - { - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; - EvmBuilder::default() - .with_db(db) - .with_external_context(inspector) - .with_handler_cfg(handler_cfg) - .append_handler_register(inspector_handle_register) - .build() - } -} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 513c7da13..224866be6 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -21,11 +21,11 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true reth-rpc-engine-api.workspace = true -reth-revm = { workspace = true, features = ["js-tracer"] } +reth-revm.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true -revm-inspectors.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } reth-evm.workspace = true reth-network-types.workspace = true diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 8ef2af2f5..acd5c30e8 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -18,7 +18,7 @@ use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, TxKind, use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; -use reth_revm::{access_list::AccessListInspector, database::StateProviderDatabase}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ state::StateOverride, AccessListWithGasUsed, Bundle, EthCallResponse, StateContext, TransactionRequest, @@ -31,6 +31,7 @@ use revm::{ }, DatabaseCommit, }; +use revm_inspectors::access_list::AccessListInspector; use tracing::trace; // Gas per transaction not creating a contract. diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 75470e1fe..721cef3db 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -24,10 +24,7 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, }; -use reth_revm::{ - database::StateProviderDatabase, - tracing::{TracingInspector, TracingInspectorConfig}, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ transaction::{ EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, @@ -47,6 +44,7 @@ use revm::{ }, GetInspector, Inspector, }; +use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::future::Future; use crate::eth::revm_utils::FillableTransaction; diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 203b5bbd7..305536aab 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -5,7 +5,6 @@ use alloy_sol_types::decode_revert_reason; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_interfaces::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; -use reth_revm::tracing::{js::JsInspectorError, MuxError}; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, }; @@ -14,6 +13,7 @@ use reth_transaction_pool::error::{ PoolTransactionError, }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; +use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; use std::time::Duration; /// Result alias diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 5ee089a91..710440914 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -11,10 +11,7 @@ use reth_primitives::{ revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, SealedHeader, B256, U256, }; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_revm::{ - database::StateProviderDatabase, - tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; use reth_rpc_types::{ state::StateOverride, @@ -31,7 +28,10 @@ use revm::{ db::{CacheDB, DatabaseCommit}, primitives::EnvWithHandlerCfg, }; -use revm_inspectors::opcode::OpcodeGasInspector; +use revm_inspectors::{ + opcode::OpcodeGasInspector, + tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, +}; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; From e172a8e38a81d43bbace5fe6f9bddf5d0b6d3d94 Mon Sep 17 00:00:00 2001 From: Rupam Dey <117000803+rupam-04@users.noreply.github.com> Date: Tue, 7 May 2024 23:11:03 +0530 Subject: [PATCH 224/250] Move and rename ```BeaconConsensus``` to ```EthBeaconConsensus``` (#8140) Co-authored-by: Matthias Seitz --- Cargo.lock | 20 +++++++++---------- Cargo.toml | 4 ++-- .../src/commands/debug_cmd/build_block.rs | 5 +++-- bin/reth/src/commands/debug_cmd/execution.rs | 5 +++-- bin/reth/src/commands/debug_cmd/merkle.rs | 5 +++-- .../src/commands/debug_cmd/replay_engine.rs | 5 +++-- bin/reth/src/commands/import.rs | 4 ++-- bin/reth/src/commands/import_op.rs | 4 ++-- bin/reth/src/commands/stage/run.rs | 4 ++-- bin/reth/src/commands/stage/unwind.rs | 4 ++-- crates/consensus/beacon/Cargo.toml | 4 ++-- crates/consensus/beacon/src/engine/sync.rs | 6 +++--- .../consensus/beacon/src/engine/test_utils.rs | 6 +++--- crates/consensus/beacon/src/lib.rs | 2 +- .../consensus}/Cargo.toml | 2 +- .../consensus}/src/lib.rs | 8 ++++---- crates/node/builder/src/launch/mod.rs | 4 ++-- 17 files changed, 48 insertions(+), 44 deletions(-) rename crates/{consensus/beacon-core => ethereum/consensus}/Cargo.toml (91%) rename crates/{consensus/beacon-core => ethereum/consensus}/src/lib.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index 76ab2dccc..164865acb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6516,13 +6516,13 @@ dependencies = [ "assert_matches", "futures", "metrics", - "reth-beacon-consensus-core", "reth-blockchain-tree", "reth-config", "reth-consensus", "reth-db", "reth-downloaders", "reth-engine-primitives", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-evm", "reth-evm-ethereum", @@ -6551,15 +6551,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-beacon-consensus-core" -version = "0.2.0-beta.6" -dependencies = [ - "reth-consensus", - "reth-consensus-common", - "reth-primitives", -] - [[package]] name = "reth-blockchain-tree" version = "0.2.0-beta.6" @@ -6931,6 +6922,15 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "reth-ethereum-consensus" +version = "0.2.0-beta.6" +dependencies = [ + "reth-consensus", + "reth-consensus-common", + "reth-primitives", +] + [[package]] name = "reth-ethereum-engine-primitives" version = "0.2.0-beta.6" diff --git a/Cargo.toml b/Cargo.toml index e6edbe5b2..dca4a1eb8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ members = [ "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", - "crates/consensus/beacon-core/", + "crates/ethereum/consensus/", "crates/consensus/common/", "crates/consensus/consensus/", "crates/ethereum-forks/", @@ -208,7 +208,7 @@ reth = { path = "bin/reth" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } -reth-beacon-consensus-core = { path = "crates/consensus/beacon-core" } +reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index dd0bfa092..72cc9e1fa 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -14,7 +14,7 @@ use eyre::Context; use reth_basic_payload_builder::{ BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, }; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; @@ -160,7 +160,8 @@ impl Command { data_dir.static_files(), )?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let executor = block_executor!(self.chain.clone()); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 33b07368a..50e93dfbc 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -12,7 +12,7 @@ use crate::{ }; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; @@ -220,7 +220,8 @@ impl Command { debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); // Configure and build network let network_secret_path = diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index c42cbdd4d..3d94a3a43 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -12,7 +12,7 @@ use crate::{ }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::Config; use reth_consensus::Consensus; @@ -156,7 +156,8 @@ impl Command { info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); // build the full block client - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let block_range_client = FullBlockClient::new(fetch_client, consensus); // get best block number diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index da2e458be..b86e707a8 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -10,7 +10,7 @@ use crate::{ use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine}; +use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeaconConsensus}; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; @@ -122,7 +122,8 @@ impl Command { let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let executor = block_executor!(self.chain.clone()); diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 3496077ae..f73cf3c17 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -12,7 +12,7 @@ use crate::{ use clap::Parser; use eyre::Context; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; use reth_db::{database::Database, init_db, tables, transaction::DbTx}; @@ -129,7 +129,7 @@ impl ImportCommand { init_genesis(provider_factory.clone())?; - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 1c5a74015..3147f9b10 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -10,7 +10,7 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, tables, transaction::DbTx}; @@ -107,7 +107,7 @@ impl ImportOpCommand { init_genesis(provider_factory.clone())?; - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 562b7e1b3..59d26fc29 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -14,7 +14,7 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; @@ -169,7 +169,7 @@ impl Command { let (mut exec_stage, mut unwind_stage): (Box>, Option>>) = match self.stage { StageEnum::Bodies => { - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); let mut config = config; config.peers.trusted_nodes_only = self.network.trusted_only; diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index c6dea1a05..b7998d087 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -1,7 +1,7 @@ //! Unwinding a certain block range use clap::{Parser, Subcommand}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{Config, PruneConfig}; use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; @@ -165,7 +165,7 @@ impl Command { .await?; let consensus: Arc = - Arc::new(BeaconConsensus::new(provider_factory.chain_spec())); + Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); // building network downloaders using the fetch client let fetch_client = network.fetch_client().await?; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 7b106b2d3..659ef02c1 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-beacon-consensus-core.workspace = true +reth-ethereum-consensus.workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-stages-api.workspace = true @@ -70,6 +70,6 @@ optimism = [ "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", - "reth-beacon-consensus-core/optimism", + "reth-ethereum-consensus/optimism", "reth-rpc/optimism" ] diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 9e206176a..fd78f461a 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -1,8 +1,8 @@ //! Sync management for the engine implementation. use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensus, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, + engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, + ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; use futures::FutureExt; use reth_db::database::Database; @@ -81,7 +81,7 @@ where Self { full_block_client: FullBlockClient::new( client, - Arc::new(BeaconConsensus::new(chain_spec)), + Arc::new(EthBeaconConsensus::new(chain_spec)), ), pipeline_task_spawner, pipeline_state: PipelineState::Idle(Some(pipeline)), diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 27fc6b44c..6cad1b471 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -1,7 +1,7 @@ use crate::{ - engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine, + engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, MIN_BLOCKS_FOR_PIPELINE_RUN, + BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -322,7 +322,7 @@ where let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { - Arc::new(BeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) } TestConsensusConfig::Test => Arc::new(TestConsensus::default()), }; diff --git a/crates/consensus/beacon/src/lib.rs b/crates/consensus/beacon/src/lib.rs index 5a9e1da4a..f62a75f94 100644 --- a/crates/consensus/beacon/src/lib.rs +++ b/crates/consensus/beacon/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use reth_beacon_consensus_core::BeaconConsensus; +pub use reth_ethereum_consensus::EthBeaconConsensus; mod engine; pub use engine::*; diff --git a/crates/consensus/beacon-core/Cargo.toml b/crates/ethereum/consensus/Cargo.toml similarity index 91% rename from crates/consensus/beacon-core/Cargo.toml rename to crates/ethereum/consensus/Cargo.toml index b5c778b05..f3ff5d4d3 100644 --- a/crates/consensus/beacon-core/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "reth-beacon-consensus-core" +name = "reth-ethereum-consensus" version.workspace = true edition.workspace = true rust-version.workspace = true diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/ethereum/consensus/src/lib.rs similarity index 96% rename from crates/consensus/beacon-core/src/lib.rs rename to crates/ethereum/consensus/src/lib.rs index 6ced95dbc..ed283f026 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -19,19 +19,19 @@ use std::{sync::Arc, time::SystemTime}; /// /// This consensus engine does basic checks as outlined in the execution specs. #[derive(Debug)] -pub struct BeaconConsensus { +pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, } -impl BeaconConsensus { - /// Create a new instance of [BeaconConsensus] +impl EthBeaconConsensus { + /// Create a new instance of [EthBeaconConsensus] pub fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for BeaconConsensus { +impl Consensus for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validation::validate_header_standalone(header, &self.chain_spec)?; Ok(()) diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index e8c5b2967..221434758 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -11,7 +11,7 @@ use futures::{future, future::Either, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::AutoSealConsensus; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensus, BeaconConsensusEngine, + BeaconConsensusEngine, EthBeaconConsensus, }; use reth_blockchain_tree::{ noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, @@ -116,7 +116,7 @@ where let consensus: Arc = if ctx.is_dev() { Arc::new(AutoSealConsensus::new(ctx.chain_spec())) } else { - Arc::new(BeaconConsensus::new(ctx.chain_spec())) + Arc::new(EthBeaconConsensus::new(ctx.chain_spec())) }; debug!(target: "reth::cli", "Spawning stages metrics listener task"); From 1188898dad00ab657bce829160dc4f8520478581 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 20:05:56 +0200 Subject: [PATCH 225/250] fix(net): max inflight requests (#8139) --- .../net/network/src/transactions/fetcher.rs | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index f26b1abe2..7c60b5497 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -1294,27 +1294,26 @@ pub enum VerificationOutcome { /// Tracks stats about the [`TransactionFetcher`]. #[derive(Debug)] pub struct TransactionFetcherInfo { - /// Currently active outgoing [`GetPooledTransactions`] requests. + /// Max inflight [`GetPooledTransactions`] requests. pub max_inflight_requests: usize, - /// Soft limit for the byte size of the expected - /// [`PooledTransactions`] response on packing a - /// [`GetPooledTransactions`] request with hashes. - pub(super) soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, - /// Soft limit for the byte size of a [`PooledTransactions`] - /// response on assembling a [`GetPooledTransactions`] - /// request. Spec'd at 2 MiB. + /// Soft limit for the byte size of the expected [`PooledTransactions`] response, upon packing + /// a [`GetPooledTransactions`] request with hashes (by default less than 2 MiB worth of + /// transactions is requested). + pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, + /// Soft limit for the byte size of a [`PooledTransactions`] response, upon assembling the + /// response. Spec'd at 2 MiB, but can be adjusted for research purpose. pub soft_limit_byte_size_pooled_transactions_response: usize, } impl TransactionFetcherInfo { /// Creates a new max pub fn new( - max_inflight_transaction_requests: usize, + max_inflight_requests: usize, soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, soft_limit_byte_size_pooled_transactions_response: usize, ) -> Self { Self { - max_inflight_requests: max_inflight_transaction_requests, + max_inflight_requests, soft_limit_byte_size_pooled_transactions_response_on_pack_request, soft_limit_byte_size_pooled_transactions_response, } @@ -1324,7 +1323,7 @@ impl TransactionFetcherInfo { impl Default for TransactionFetcherInfo { fn default() -> Self { Self::new( - DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES, + DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize * DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER as usize, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE ) From 7c4d37b27057105ff196e90558d9083405585eca Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 20:08:10 +0200 Subject: [PATCH 226/250] perf(net): constraint algorithm fill request with hashes (#8142) --- .../net/network/src/transactions/constants.rs | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 9e37f0786..107d9758b 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -153,19 +153,20 @@ pub mod tx_fetcher { /// search is budget constrained. /// /// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes - /// (the breadth of the search), divided by [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which - /// defaults to 3 peers (the depth of the search), so the 711 lru hashes in the pending hashes - /// cache. + /// (the ideal max number of hashes pending fetch), divided by + /// [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which defaults to 3 peers (the depth of the search), + /// so a search breadth of 711 lru hashes in the pending hashes cache. pub const DEFAULT_BUDGET_FIND_IDLE_FALLBACK_PEER: usize = DEFAULT_MAX_COUNT_PENDING_FETCH / 6 / DEFAULT_MAX_COUNT_FALLBACK_PEERS as usize; /// Default budget for finding hashes in the intersection of transactions announced by a peer /// and in the cache of hashes pending fetch, when said search is budget constrained. /// - /// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes - /// (the breadth of the search), so 2133 lru hashes in the pending hashes cache. + /// Default is an eight of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes + /// (the ideal max number of hashes pending fetch), so a search breadth of 1 600 lru hashes in + /// the pending hashes cache. pub const DEFAULT_BUDGET_FIND_INTERSECTION_ANNOUNCED_BY_PEER_AND_PENDING_FETCH: usize = - DEFAULT_MAX_COUNT_PENDING_FETCH / 6; + DEFAULT_MAX_COUNT_PENDING_FETCH / 8; /* ====== SCALARS FOR USE ON FETCH PENDING HASHES ====== */ @@ -209,8 +210,8 @@ pub mod tx_fetcher { /// for the intersection of hashes announced by a peer and hashes pending fetch. The max /// inflight requests is configured in [`TransactionFetcherInfo`]. /// - /// Default is 2 requests. - pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 2; + /// Default is 3 requests. + pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 3; // Default divisor to the max pending pool imports when calculating search breadth of the /// search for any idle peer to which to send a request filled with hashes pending fetch. @@ -225,8 +226,8 @@ pub mod tx_fetcher { /// The max pending pool imports is configured in /// [`PendingPoolImportsInfo`](crate::transactions::PendingPoolImportsInfo). /// - /// Default is 3 requests. - pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 3; + /// Default is 4 requests. + pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 4; /* ================== ROUGH MEASURES ================== */ From 9bd74fda9e347eb5d343e8ce9d234a4c8b976072 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 7 May 2024 21:16:04 +0100 Subject: [PATCH 227/250] fix: use `--syncmode=execution-layer` from `op-node` for optimistic pipeline sync (#7552) Co-authored-by: Matthias Seitz --- bin/reth/src/optimism.rs | 32 +------ crates/blockchain-tree/src/block_buffer.rs | 6 +- crates/blockchain-tree/src/blockchain_tree.rs | 59 +++++++++++- crates/blockchain-tree/src/noop.rs | 6 ++ crates/blockchain-tree/src/shareable.rs | 9 ++ .../beacon/src/engine/hooks/controller.rs | 12 ++- crates/consensus/beacon/src/engine/mod.rs | 85 +++++++++++++----- crates/consensus/beacon/src/engine/sync.rs | 18 ++-- crates/e2e-test-utils/src/engine_api.rs | 2 +- crates/e2e-test-utils/src/node.rs | 22 +++-- crates/e2e-test-utils/src/wallet.rs | 5 +- .../interfaces/src/blockchain_tree/error.rs | 15 +++- crates/interfaces/src/blockchain_tree/mod.rs | 7 ++ crates/optimism/node/tests/e2e/p2p.rs | 90 ++++++++++++++----- crates/optimism/node/tests/e2e/utils.rs | 29 ++++-- crates/primitives/src/stage/mod.rs | 44 +++++++++ crates/rpc/rpc/src/eth/error.rs | 7 +- crates/stages-api/src/pipeline/mod.rs | 24 +++-- crates/storage/provider/src/providers/mod.rs | 4 + 19 files changed, 365 insertions(+), 111 deletions(-) diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs index a651314b8..581718797 100644 --- a/bin/reth/src/optimism.rs +++ b/bin/reth/src/optimism.rs @@ -2,11 +2,7 @@ use clap::Parser; use reth::cli::Cli; -use reth_node_builder::NodeHandle; -use reth_node_optimism::{ - args::RollupArgs, rpc::SequencerClient, OptimismEngineTypes, OptimismNode, -}; -use reth_provider::BlockReaderIdExt; +use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; use std::sync::Arc; // We use jemalloc for performance reasons @@ -27,7 +23,7 @@ fn main() { } if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let NodeHandle { node, node_exit_future } = builder + let handle = builder .node(OptimismNode::new(rollup_args.clone())) .extend_rpc_modules(move |ctx| { // register sequencer tx forwarder @@ -42,29 +38,7 @@ fn main() { .launch() .await?; - // If `enable_genesis_walkback` is set to true, the rollup client will need to - // perform the derivation pipeline from genesis, validating the data dir. - // When set to false, set the finalized, safe, and unsafe head block hashes - // on the rollup client using a fork choice update. This prevents the rollup - // client from performing the derivation pipeline from genesis, and instead - // starts syncing from the current tip in the DB. - if node.chain_spec().is_optimism() && !rollup_args.enable_genesis_walkback { - let client = node.rpc_server_handles.auth.http_client(); - if let Ok(Some(head)) = node.provider.latest_header() { - reth_rpc_api::EngineApiClient::::fork_choice_updated_v2( - &client, - reth_rpc_types::engine::ForkchoiceState { - head_block_hash: head.hash(), - safe_block_hash: head.hash(), - finalized_block_hash: head.hash(), - }, - None, - ) - .await?; - } - } - - node_exit_future.await + handle.node_exit_future.await }) { eprintln!("Error: {err:?}"); std::process::exit(1); diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 23c6ca681..14e896337 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -104,13 +104,13 @@ impl BlockBuffer { removed } - /// Discard all blocks that precede finalized block number from the buffer. - pub fn remove_old_blocks(&mut self, finalized_number: BlockNumber) { + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block_number: BlockNumber) { let mut block_hashes_to_remove = Vec::new(); // discard all blocks that are before the finalized number. while let Some(entry) = self.earliest_blocks.first_entry() { - if *entry.key() > finalized_number { + if *entry.key() > block_number { break } let block_hashes = entry.remove(); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 64d311549..689994471 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -19,13 +19,14 @@ use reth_interfaces::{ }; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, U256, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, }; use reth_provider::{ chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, + StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ @@ -783,6 +784,11 @@ where Ok(InsertPayloadOk::Inserted(status)) } + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block: BlockNumber) { + self.state.buffered_blocks.remove_old_blocks(block); + } + /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. pub fn finalize_block(&mut self, finalized_block: BlockNumber) { // remove blocks @@ -797,7 +803,7 @@ where } } // clean block buffer. - self.state.buffered_blocks.remove_old_blocks(finalized_block); + self.remove_old_blocks(finalized_block); } /// Reads the last `N` canonical hashes from the database and updates the block indices of the @@ -817,6 +823,16 @@ where ) -> RethResult<()> { self.finalize_block(last_finalized_block); + let last_canonical_hashes = self.update_block_hashes()?; + + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them and removes all chains. + pub fn update_block_hashes(&mut self) -> RethResult> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -831,9 +847,22 @@ where } } - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + Ok(last_canonical_hashes) + } - Ok(()) + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + pub fn update_block_hashes_and_clear_buffered( + &mut self, + ) -> RethResult> { + let chain = self.update_block_hashes()?; + + if let Some((block, _)) = chain.last_key_value() { + self.remove_old_blocks(*block); + } + + Ok(chain) } /// Reads the last `N` canonical hashes from the database and updates the block indices of the @@ -1220,6 +1249,28 @@ where &self, revert_until: BlockNumber, ) -> Result, CanonicalError> { + // This should only happen when an optimistic sync target was re-orged. + // + // Static files generally contain finalized data. The blockchain tree only deals + // with unfinalized data. The only scenario where canonical reverts go past the highest + // static file is when an optimistic sync occured and unfinalized data was written to + // static files. + if self + .externals + .provider_factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default() > + revert_until + { + trace!( + target: "blockchain_tree", + "Reverting optimistic canonical chain to block {}", + revert_until + ); + return Err(CanonicalError::OptimisticTargetRevert(revert_until)) + } + // read data that is needed for new sidechain let provider_rw = self.externals.provider_factory.provider_rw()?; diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index bb99f9b55..776a15325 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -68,6 +68,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } + + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult> { + Ok(BTreeMap::new()) + } } impl BlockchainTreeViewer for NoopBlockchainTree { diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 061b49f4c..77cc53c2d 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -83,6 +83,15 @@ where res } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult> { + let mut tree = self.tree.write(); + let res = tree.update_block_hashes_and_clear_buffered(); + tree.update_chains_metrics(); + res + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); let mut tree = self.tree.write(); diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 47085be00..7916928db 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -130,10 +130,16 @@ impl EngineHooksController { args: EngineHookContext, db_write_active: bool, ) -> Poll> { - // Hook with DB write access level is not allowed to run due to already running hook with DB - // write access level or active DB write according to passed argument + // Hook with DB write access level is not allowed to run due to any of the following + // reasons: + // - An already running hook with DB write access level + // - Active DB write according to passed argument + // - Missing a finalized block number. We might be on an optimistic sync scenario where we + // cannot skip the FCU with the finalized hash, otherwise CL might misbehave. if hook.db_access_level().is_read_write() && - (self.active_db_write_hook.is_some() || db_write_active) + (self.active_db_write_hook.is_some() || + db_write_active || + args.finalized_block_number.is_none()) { return Poll::Pending } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 4e3550cd3..1057457c7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -15,8 +15,9 @@ use reth_interfaces::{ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ - constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, - SealedHeader, B256, + constants::EPOCH_SLOTS, + stage::{PipelineTarget, StageId}, + BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, B256, }; use reth_provider::{ BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, @@ -316,7 +317,7 @@ where }; if let Some(target) = maybe_pipeline_target { - this.sync.set_pipeline_sync_target(target); + this.sync.set_pipeline_sync_target(target.into()); } Ok((this, handle)) @@ -668,6 +669,21 @@ where // threshold return Some(state.finalized_block_hash) } + + // OPTIMISTIC SYNCING + // + // It can happen when the node is doing an + // optimistic sync, where the CL has no knowledge of the finalized hash, + // but is expecting the EL to sync as high + // as possible before finalizing. + // + // This usually doesn't happen on ETH mainnet since CLs use the more + // secure checkpoint syncing. + // + // However, optimism chains will do this. The risk of a reorg is however + // low. + debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); + return Some(state.head_block_hash) } Ok(Some(_)) => { // we're fully synced to the finalized block @@ -981,6 +997,10 @@ where // so we should not warn the user, since this will result in us attempting to sync // to a new target and is considered normal operation during sync } + CanonicalError::OptimisticTargetRevert(block_number) => { + self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); + return PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } _ => { warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); // TODO(mattsse) better error handling before attempting to sync (FCU could be @@ -1011,7 +1031,7 @@ where if self.pipeline_run_threshold == 0 { // use the pipeline to sync to the target trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); } else { // trigger a full block download for missing hash, or the parent of its lowest buffered // ancestor @@ -1361,7 +1381,7 @@ where ) { // we don't have the block yet and the distance exceeds the allowed // threshold - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); // we can exit early here because the pipeline will take care of syncing return } @@ -1445,6 +1465,8 @@ where // TODO: do not ignore this let _ = self.blockchain.make_canonical(*target_hash.as_ref()); } + } else if let Some(block_number) = err.optimistic_revert_block_number() { + self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); } Err((target.head_block_hash, err)) @@ -1506,13 +1528,7 @@ where // update the canon chain if continuous is enabled if self.sync.run_pipeline_continuously() { - let max_block = ctrl.block_number().unwrap_or_default(); - let max_header = self.blockchain.sealed_header(max_block) - .inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - })? - .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; - self.blockchain.set_canonical_head(max_header); + self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; } let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { @@ -1525,6 +1541,14 @@ where } }; + if sync_target_state.finalized_block_hash.is_zero() { + self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; + self.blockchain.update_block_hashes_and_clear_buffered()?; + self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; + // We are on an optimistic syncing process, better to wait for the next FCU to handle + return Ok(()) + } + // Next, we check if we need to schedule another pipeline run or transition // to live sync via tree. // This can arise if we buffer the forkchoice head, and if the head is an @@ -1580,7 +1604,7 @@ where // the tree update from executing too many blocks and blocking. if let Some(target) = pipeline_target { // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); } else if let Some(number) = self.blockchain.block_number(sync_target_state.finalized_block_hash)? { @@ -1592,12 +1616,23 @@ where } else { // We don't have the finalized block in the database, so we need to // trigger another pipeline run. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); + self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); } Ok(()) } + fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { + let max_header = self.blockchain.sealed_header(max_block) + .inspect_err(|error| { + error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); + })? + .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; + self.blockchain.set_canonical_head(max_header); + + Ok(()) + } + fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { error!( @@ -1746,16 +1781,20 @@ where Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); let _ = tx.send(response); return Err(RethError::Canonical(error)) + } else if error.optimistic_revert_block_number().is_some() { + // engine already set the pipeline unwind target on + // `try_make_sync_target_canonical` + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + ) } - - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - ) } }; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index fd78f461a..261b6874f 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -11,7 +11,7 @@ use reth_interfaces::p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, }; -use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, B256}; +use reth_primitives::{stage::PipelineTarget, BlockNumber, ChainSpec, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventListeners; @@ -44,7 +44,7 @@ where /// The pipeline is used for large ranges. pipeline_state: PipelineState, /// Pending target block for the pipeline to sync - pending_pipeline_target: Option, + pending_pipeline_target: Option, /// In-flight full block requests in progress. inflight_full_block_requests: Vec>, /// In-flight full block _range_ requests in progress. @@ -216,8 +216,12 @@ where /// Sets a new target to sync the pipeline to. /// /// But ensures the target is not the zero hash. - pub(crate) fn set_pipeline_sync_target(&mut self, target: B256) { - if target.is_zero() { + pub(crate) fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { + if target.sync_target().is_some_and(|target| target.is_zero()) { + trace!( + target: "consensus::engine::sync", + "Pipeline target cannot be zero hash." + ); // precaution to never sync to the zero hash return } @@ -384,7 +388,7 @@ pub(crate) enum EngineSyncEvent { /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. - PipelineStarted(Option), + PipelineStarted(Option), /// Pipeline finished /// /// If this is returned, the pipeline is idle. @@ -590,7 +594,7 @@ mod tests { .build(pipeline, chain_spec); let tip = client.highest_block().expect("there should be blocks here"); - sync_controller.set_pipeline_sync_target(tip.hash()); + sync_controller.set_pipeline_sync_target(tip.hash().into()); let sync_future = poll_fn(|cx| sync_controller.poll(cx)); let next_event = poll!(sync_future); @@ -598,7 +602,7 @@ mod tests { // can assert that the first event here is PipelineStarted because we set the sync target, // and we should get Ready because the pipeline should be spawned immediately assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { - assert_eq!(target, tip.hash()); + assert_eq!(target.sync_target().unwrap(), tip.hash()); }); // the next event should be the pipeline finishing in a good state diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 13b735aea..fefd7d6ff 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -63,7 +63,7 @@ impl EngineApiTestContext { ) .await?; - assert!(submission.status == expected_status); + assert_eq!(submission.status, expected_status); Ok(submission.latest_valid_hash.unwrap_or_default()) } diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 668af6034..0ae20664a 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -5,7 +5,6 @@ use crate::{ use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; - use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, @@ -171,10 +170,7 @@ where if check { if let Some(latest_block) = self.inner.provider.block_by_number(number)? { - if latest_block.hash_slow() != expected_block_hash { - // TODO: only if its awaiting a reorg - continue - } + assert_eq!(latest_block.hash_slow(), expected_block_hash); break } if wait_finish_checkpoint { @@ -185,8 +181,22 @@ where Ok(()) } + pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> { + loop { + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? { + if checkpoint.block_number == number { + break + } + } + } + Ok(()) + } + /// Asserts that a new block has been added to the blockchain - /// and the tx has been included in the block + /// and the tx has been included in the block. + /// + /// Does NOT work for pipeline since there's no stream notification! pub async fn assert_new_block( &mut self, tip_tx_hash: B256, diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index d94dec2a0..e841e7cd7 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -4,7 +4,8 @@ use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; /// One of the accounts of the genesis allocations. pub struct Wallet { pub inner: LocalWallet, - chain_id: u64, + pub inner_nonce: u64, + pub chain_id: u64, amount: usize, derivation_path: Option, } @@ -13,7 +14,7 @@ impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) pub fn new(amount: usize) -> Self { let inner = MnemonicBuilder::::default().phrase(TEST_MNEMONIC).build().unwrap(); - Self { inner, chain_id: 1, amount, derivation_path: None } + Self { inner, chain_id: 1, amount, derivation_path: None, inner_nonce: 0 } } /// Sets chain id diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index b805c6ee8..e9cdb8714 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -67,6 +67,9 @@ pub enum CanonicalError { /// Error indicating a transaction failed to commit during execution. #[error("transaction error on commit: {0}")] CanonicalCommit(String), + /// Error indicating that a previous optimistic sync target was re-orged + #[error("transaction error on revert: {0}")] + OptimisticTargetRevert(BlockNumber), } impl CanonicalError { @@ -83,6 +86,15 @@ impl CanonicalError { CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. }) ) } + + /// Returns `Some(BlockNumber)` if the underlying error matches + /// [CanonicalError::OptimisticTargetRevert]. + pub fn optimistic_revert_block_number(&self) -> Option { + match self { + CanonicalError::OptimisticTargetRevert(block_number) => Some(*block_number), + _ => None, + } + } } /// Error thrown when inserting a block failed because the block is considered invalid. @@ -316,7 +328,8 @@ impl InsertBlockErrorKind { InsertBlockErrorKind::Canonical(err) => match err { CanonicalError::BlockchainTree(_) | CanonicalError::CanonicalCommit(_) | - CanonicalError::CanonicalRevert(_) => false, + CanonicalError::CanonicalRevert(_) | + CanonicalError::OptimisticTargetRevert(_) => false, CanonicalError::Validation(_) => true, CanonicalError::Provider(_) => false, }, diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index d8ad667fc..7d2b50e41 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -78,6 +78,13 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { last_finalized_block: BlockNumber, ) -> RethResult<()>; + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult>; + /// Reads the last `N` canonical hashes from the database and updates the block indices of the /// tree by attempting to connect the buffered blocks to canonical hashes. /// diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index a38fadf67..9e3741055 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,41 +1,89 @@ use crate::utils::{advance_chain, setup}; -use reth::primitives::BASE_MAINNET; -use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet}; -use reth_primitives::ChainId; +use reth_interfaces::blockchain_tree::error::BlockchainTreeError; +use reth_rpc_types::engine::PayloadStatusEnum; +use std::sync::Arc; +use tokio::sync::Mutex; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let chain_id: ChainId = BASE_MAINNET.chain.into(); + let (mut nodes, _tasks, wallet) = setup(3).await?; + let wallet = Arc::new(Mutex::new(wallet)); - let (mut nodes, _tasks, _wallet) = setup(2).await?; - - let second_node = nodes.pop().unwrap(); + let third_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); let mut first_node = nodes.pop().unwrap(); - let tip: usize = 300; + let tip: usize = 90; let tip_index: usize = tip - 1; + let reorg_depth = 2; - let wallet = Wallet::default(); - - // On first node, create a chain up to block number 300a - let canonical_payload_chain = advance_chain(tip, &mut first_node, |nonce: u64| { - let wallet = wallet.inner.clone(); - Box::pin(async move { - TransactionTestContext::optimism_l1_block_info_tx(chain_id, wallet, nonce).await - }) - }) - .await?; + // On first node, create a chain up to block number 90a + let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; let canonical_chain = canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); - // On second node, sync up to block number 300a + // On second node, sync optimistically up to block number 88a second_node .engine_api - .update_forkchoice(canonical_chain[tip_index], canonical_chain[tip_index]) + .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]) + .await?; + second_node + .wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], true) .await?; - second_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; + + // On third node, sync optimistically up to block number 90a + third_node.engine_api.update_optimistic_forkchoice(canonical_chain[tip_index]).await?; + third_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; + + // On second node, create a side chain: 88a -> 89b -> 90b + wallet.lock().await.inner_nonce -= reorg_depth as u64; + second_node.payload.timestamp = first_node.payload.timestamp - reorg_depth as u64; // TODO: probably want to make it node agnostic + let side_payload_chain = advance_chain(reorg_depth, &mut second_node, wallet.clone()).await?; + let side_chain = side_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); + + // Creates fork chain by submitting 89b payload. + // By returning Valid here, op-node will finally return a finalized hash + let _ = third_node + .engine_api + .submit_payload( + side_payload_chain[0].0.clone(), + side_payload_chain[0].1.clone(), + PayloadStatusEnum::Valid, + Default::default(), + ) + .await; + + // It will issue a pipeline reorg to 88a, and then make 89b canonical AND finalized. + third_node.engine_api.update_forkchoice(side_chain[0], side_chain[0]).await?; + + // Make sure we have the updated block + third_node.wait_unwind((tip - reorg_depth) as u64).await?; + third_node + .wait_block( + side_payload_chain[0].0.block().number, + side_payload_chain[0].0.block().hash(), + true, + ) + .await?; + + // Make sure that trying to submit 89a again will result in an invalid payload status, since 89b + // has been set as finalized. + let _ = third_node + .engine_api + .submit_payload( + canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), + canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), + PayloadStatusEnum::Invalid { + validation_error: BlockchainTreeError::PendingBlockIsFinalized { + last_finalized: (tip - reorg_depth) as u64 + 1, + } + .to_string(), + }, + Default::default(), + ) + .await; Ok(()) } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index e86a7c654..ad19086ae 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,9 +1,10 @@ -use reth::{primitives::Bytes, rpc::types::engine::PayloadAttributes, tasks::TaskManager}; -use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType}; +use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; -use std::{future::Future, pin::Pin, sync::Arc}; +use std::sync::Arc; +use tokio::sync::Mutex; /// Optimism Node Helper type pub(crate) type OpNode = NodeHelperType; @@ -24,12 +25,30 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa .await } +/// Advance the chain with sequential payloads returning them in the end. pub(crate) async fn advance_chain( length: usize, node: &mut OpNode, - tx_generator: impl Fn(u64) -> Pin>>, + wallet: Arc>, ) -> eyre::Result> { - node.advance(length as u64, tx_generator, optimism_payload_attributes).await + node.advance( + length as u64, + |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }, + optimism_payload_attributes, + ) + .await } /// Helper function to create a new eth payload attributes diff --git a/crates/primitives/src/stage/mod.rs b/crates/primitives/src/stage/mod.rs index ffe52554d..3c7c972bc 100644 --- a/crates/primitives/src/stage/mod.rs +++ b/crates/primitives/src/stage/mod.rs @@ -1,6 +1,7 @@ //! Staged sync primitives. mod id; +use crate::{BlockHash, BlockNumber}; pub use id::StageId; mod checkpoints; @@ -9,3 +10,46 @@ pub use checkpoints::{ HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, StageUnitCheckpoint, StorageHashingCheckpoint, }; + +/// Direction and target block for pipeline operations. +#[derive(Debug, Clone, Copy)] +pub enum PipelineTarget { + /// Target for forward synchronization, indicating a block hash to sync to. + Sync(BlockHash), + /// Target for backward unwinding, indicating a block number to unwind to. + Unwind(BlockNumber), +} + +impl PipelineTarget { + /// Returns the target block hash for forward synchronization, if applicable. + /// + /// # Returns + /// + /// - `Some(BlockHash)`: The target block hash for forward synchronization. + /// - `None`: If the target is for backward unwinding. + pub fn sync_target(self) -> Option { + match self { + PipelineTarget::Sync(hash) => Some(hash), + PipelineTarget::Unwind(_) => None, + } + } + + /// Returns the target block number for backward unwinding, if applicable. + /// + /// # Returns + /// + /// - `Some(BlockNumber)`: The target block number for backward unwinding. + /// - `None`: If the target is for forward synchronization. + pub fn unwind_target(self) -> Option { + match self { + PipelineTarget::Sync(_) => None, + PipelineTarget::Unwind(number) => Some(number), + } + } +} + +impl From for PipelineTarget { + fn from(hash: BlockHash) -> Self { + Self::Sync(hash) + } +} diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 305536aab..df2aef800 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -39,7 +39,12 @@ pub enum EthApiError { UnknownBlockNumber, /// Thrown when querying for `finalized` or `safe` block before the merge transition is /// finalized, - #[error("unknown block")] + /// + /// op-node uses case sensitive string comparison to parse this error: + /// + /// + /// TODO(#8045): Temporary, until a version of is pushed through that doesn't require this to figure out the EL sync status. + #[error("Unknown block")] UnknownSafeOrFinalizedBlock, /// Thrown when an unknown block or transaction index is encountered #[error("unknown block or tx index")] diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 0cbd993c5..199cc41e6 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -7,7 +7,7 @@ use reth_db::database::Database; use reth_interfaces::RethResult; use reth_primitives::{ constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, - stage::{StageCheckpoint, StageId}, + stage::{PipelineTarget, StageCheckpoint, StageId}, static_file::HighestStaticFiles, BlockNumber, B256, }; @@ -130,17 +130,31 @@ where /// Consume the pipeline and run it until it reaches the provided tip, if set. Return the /// pipeline and its result as a future. #[track_caller] - pub fn run_as_fut(mut self, tip: Option) -> PipelineFut { + pub fn run_as_fut(mut self, target: Option) -> PipelineFut { // TODO: fix this in a follow up PR. ideally, consensus engine would be responsible for // updating metrics. let _ = self.register_metrics(); // ignore error Box::pin(async move { // NOTE: the tip should only be None if we are in continuous sync mode. - if let Some(tip) = tip { - self.set_tip(tip); + if let Some(target) = target { + match target { + PipelineTarget::Sync(tip) => self.set_tip(tip), + PipelineTarget::Unwind(target) => { + if let Err(err) = self.produce_static_files() { + return (self, Err(err.into())) + } + if let Err(err) = self.unwind(target, None) { + return (self, Err(err)) + } + self.progress.update(target); + + return (self, Ok(ControlFlow::Continue { block_number: target })) + } + } } + let result = self.run_loop().await; - trace!(target: "sync::pipeline", ?tip, ?result, "Pipeline finished"); + trace!(target: "sync::pipeline", ?target, ?result, "Pipeline finished"); (self, result) }) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b0f43ba9f..bf94e32cf 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -669,6 +669,10 @@ where self.tree.finalize_block(finalized_block) } + fn update_block_hashes_and_clear_buffered(&self) -> RethResult> { + self.tree.update_block_hashes_and_clear_buffered() + } + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, From 0ad9c7866b6eef75234a353c1dabcce5fb7cc8a9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 May 2024 22:26:58 +0200 Subject: [PATCH 228/250] feat(op): import receipts (#7914) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- bin/reth/src/cli/mod.rs | 8 +- bin/reth/src/commands/import.rs | 2 +- bin/reth/src/commands/import_op.rs | 6 +- bin/reth/src/commands/import_receipts.rs | 165 +++++++++ bin/reth/src/commands/mod.rs | 1 + crates/net/downloaders/src/file_client.rs | 207 ++++++----- .../downloaders/src/file_codec_ovm_receipt.rs | 344 ++++++++++++++++++ crates/net/downloaders/src/lib.rs | 19 + .../downloaders/src/receipt_file_client.rs | 268 ++++++++++++++ crates/primitives/Cargo.toml | 3 - crates/storage/provider/src/lib.rs | 1 + 11 files changed, 930 insertions(+), 94 deletions(-) create mode 100644 bin/reth/src/commands/import_receipts.rs create mode 100644 crates/net/downloaders/src/file_codec_ovm_receipt.rs create mode 100644 crates/net/downloaders/src/receipt_file_client.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 40e1f24be..deece5b62 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -8,7 +8,7 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, + config_cmd, db, debug_cmd, dump_genesis, import, import_receipts, init_cmd, init_state, node::{self, NoArgs}, p2p, recover, stage, test_vectors, }, @@ -150,6 +150,9 @@ impl Cli { Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportReceipts(command) => { + runner.run_blocking_until_ctrl_c(command.execute()) + } #[cfg(feature = "optimism")] Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), @@ -188,6 +191,9 @@ pub enum Commands { /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), + /// This imports RLP encoded receipts from a file. + #[command(name = "import-receipts")] + ImportReceipts(import_receipts::ImportReceiptsCommand), /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. #[cfg(feature = "optimism")] #[command(name = "import-op")] diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index f73cf3c17..354787f32 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -138,7 +138,7 @@ impl ImportCommand { let mut total_decoded_blocks = 0; let mut total_decoded_txns = 0; - while let Some(file_client) = reader.next_chunk().await? { + while let Some(file_client) = reader.next_chunk::().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", "Importing chain file chunk" diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 3147f9b10..8ca1baf5b 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -14,7 +14,9 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, tables, transaction::DbTx}; -use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}; +use reth_downloaders::file_client::{ + ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, +}; use reth_node_core::init::init_genesis; @@ -117,7 +119,7 @@ impl ImportOpCommand { let mut total_decoded_txns = 0; let mut total_filtered_out_dup_txns = 0; - while let Some(mut file_client) = reader.next_chunk().await? { + while let Some(mut file_client) = reader.next_chunk::().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", "Importing chain file chunk" diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs new file mode 100644 index 000000000..8e06c3c03 --- /dev/null +++ b/bin/reth/src/commands/import_receipts.rs @@ -0,0 +1,165 @@ +//! Command that imports receipts from a file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use clap::Parser; +use reth_db::{database::Database, init_db, transaction::DbTx, DatabaseEnv}; +use reth_downloaders::{ + file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + receipt_file_client::ReceiptFileClient, +}; +use reth_node_core::version::SHORT_VERSION; +use reth_primitives::{stage::StageId, ChainSpec, StaticFileSegment}; +use reth_provider::{ + BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, StaticFileWriter, +}; +use tracing::{debug, error, info}; + +use std::{path::PathBuf, sync::Arc}; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct ImportReceiptsCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a receipts file for import. + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportReceiptsCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking receipts import" + ); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + + let db_path = data_dir.db(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + + let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; + + let provider = provider_factory.provider_rw()?; + let static_file_provider = provider_factory.static_file_provider(); + + for stage in StageId::ALL { + let checkpoint = provider.get_stage_checkpoint(stage)?; + debug!(target: "reth::cli", + ?stage, + ?checkpoint, + "Read stage checkpoints from db" + ); + } + + // prepare the tx for `write_to_storage` + let tx = provider.into_tx(); + let mut total_decoded_receipts = 0; + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + while let Some(file_client) = reader.next_chunk::().await? { + // create a new file client from chunk read from file + let ReceiptFileClient { receipts, first_block, total_receipts: total_receipts_chunk } = + file_client; + + // mark these as decoded + total_decoded_receipts += total_receipts_chunk; + + info!(target: "reth::cli", + first_receipts_block=?first_block, + total_receipts_chunk, + "Importing receipt file chunk" + ); + + // We're reusing receipt writing code internal to + // `BundleStateWithReceipts::write_to_storage`, so we just use a default empty + // `BundleState`. + let bundled_state = + BundleStateWithReceipts::new(Default::default(), receipts, first_block); + + let static_file_producer = + static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; + + // finally, write the receipts + bundled_state.write_to_storage::<::TXMut>( + &tx, + Some(static_file_producer), + OriginalValuesKnown::Yes, + )?; + } + + tx.commit()?; + // as static files works in file ranges, internally it will be committing when creating the + // next file range already, so we only need to call explicitly at the end. + static_file_provider.commit()?; + + if total_decoded_receipts == 0 { + error!(target: "reth::cli", "No receipts were imported, ensure the receipt file is valid and not empty"); + return Ok(()) + } + + // compare the highest static file block to the number of receipts we decoded + // + // `HeaderNumbers` and `TransactionHashNumbers` tables serve as additional indexes, but + // nothing like this needs to exist for Receipts. So `tx.entries::` would + // return zero here. + let total_imported_receipts = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Receipts) + .expect("static files must exist after ensuring we decoded more than zero"); + + if total_imported_receipts != total_decoded_receipts as u64 { + error!(target: "reth::cli", + total_decoded_receipts, + total_imported_receipts, + "Receipts were partially imported" + ); + } + + info!(target: "reth::cli", total_imported_receipts, "Receipt file imported"); + + Ok(()) + } +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index a005d5e8b..9e6ff8f84 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -6,6 +6,7 @@ pub mod debug_cmd; pub mod dump_genesis; pub mod import; pub mod import_op; +pub mod import_receipts; pub mod init_cmd; pub mod init_state; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index ef72a891b..85fac4642 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,4 +1,5 @@ use super::file_codec::BlockFileCodec; +use futures::Future; use itertools::Either; use reth_interfaces::p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -12,7 +13,7 @@ use reth_primitives::{ BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, SealedHeader, B256, }; -use std::{collections::HashMap, path::Path}; +use std::{collections::HashMap, io, path::Path}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; @@ -57,6 +58,16 @@ pub enum FileClientError { /// An error occurred when decoding blocks, headers, or rlp headers from the file. #[error("{0}")] Rlp(alloy_rlp::Error, Vec), + + /// Custom error message. + #[error("{0}")] + Custom(&'static str), +} + +impl From<&'static str> for FileClientError { + fn from(value: &'static str) -> Self { + Self::Custom(value) + } } impl FileClient { @@ -78,82 +89,6 @@ impl FileClient { Ok(Self::from_reader(&reader[..], file_len).await?.0) } - /// Initialize the [`FileClient`] from bytes that have been read from file. - pub(crate) async fn from_reader( - reader: B, - num_bytes: u64, - ) -> Result<(Self, Vec), FileClientError> - where - B: AsyncReadExt + Unpin, - { - let mut headers = HashMap::new(); - let mut hash_to_number = HashMap::new(); - let mut bodies = HashMap::new(); - - // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); - - trace!(target: "downloaders::file", - target_num_bytes=num_bytes, - capacity=stream.read_buffer().capacity(), - "init decode stream" - ); - - let mut remaining_bytes = vec![]; - - let mut log_interval = 0; - let mut log_interval_start_block = 0; - - while let Some(block_res) = stream.next().await { - let block = match block_res { - Ok(block) => block, - Err(FileClientError::Rlp(err, bytes)) => { - trace!(target: "downloaders::file", - %err, - bytes_len=bytes.len(), - "partial block returned from decoding chunk" - ); - remaining_bytes = bytes; - break - } - Err(err) => return Err(err), - }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); - - // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert( - block_hash, - BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - }, - ); - - if log_interval == 0 { - trace!(target: "downloaders::file", - block_number, - "read first block" - ); - log_interval_start_block = block_number; - } else if log_interval % 100_000 == 0 { - trace!(target: "downloaders::file", - blocks=?log_interval_start_block..=block_number, - "read blocks from file" - ); - log_interval_start_block = block_number + 1; - } - log_interval += 1; - } - - trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); - - Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) - } - /// Get the tip hash of the chain. pub fn tip(&self) -> Option { self.headers.get(&self.max_block()?).map(|h| h.hash_slow()) @@ -241,6 +176,88 @@ impl FileClient { } } +impl FromReader for FileClient { + type Error = FileClientError; + + /// Initialize the [`FileClient`] from bytes that have been read from file. + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + let mut headers = HashMap::new(); + let mut hash_to_number = HashMap::new(); + let mut bodies = HashMap::new(); + + // use with_capacity to make sure the internal buffer contains the entire chunk + let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + "init decode stream" + ); + + let mut remaining_bytes = vec![]; + + let mut log_interval = 0; + let mut log_interval_start_block = 0; + + async move { + while let Some(block_res) = stream.next().await { + let block = match block_res { + Ok(block) => block, + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial block returned from decoding chunk" + ); + remaining_bytes = bytes; + break + } + Err(err) => return Err(err), + }; + let block_number = block.header.number; + let block_hash = block.header.hash_slow(); + + // add to the internal maps + headers.insert(block.header.number, block.header.clone()); + hash_to_number.insert(block_hash, block.header.number); + bodies.insert( + block_hash, + BlockBody { + transactions: block.body, + ommers: block.ommers, + withdrawals: block.withdrawals, + }, + ); + + if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + "read first block" + ); + log_interval_start_block = block_number; + } else if log_interval % 100_000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + "read blocks from file" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; + } + + trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); + + Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) + } + } +} + impl HeadersClient for FileClient { type Output = HeadersFut; @@ -341,6 +358,11 @@ pub struct ChunkedFileReader { } impl ChunkedFileReader { + /// Returns the remaining file length. + pub fn file_len(&self) -> u64 { + self.file_byte_len + } + /// Opens the file to import from given path. Returns a new instance. If no chunk byte length /// is passed, chunks have [`DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE`] (one static file). pub async fn new>( @@ -377,7 +399,10 @@ impl ChunkedFileReader { } /// Read next chunk from file. Returns [`FileClient`] containing decoded chunk. - pub async fn next_chunk(&mut self) -> Result, FileClientError> { + pub async fn next_chunk(&mut self) -> Result, T::Error> + where + T: FromReader, + { if self.file_byte_len == 0 && self.chunk.is_empty() { // eof return Ok(None) @@ -391,6 +416,7 @@ impl ChunkedFileReader { // read new bytes from file let mut reader = BytesMut::zeroed(new_read_bytes_target_len as usize); + // actual bytes that have been read let new_read_bytes_len = self.file.read_exact(&mut reader).await? as u64; @@ -416,14 +442,7 @@ impl ChunkedFileReader { // make new file client from chunk let (file_client, bytes) = - FileClient::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; - - debug!(target: "downloaders::file", - headers_len=file_client.headers.len(), - bodies_len=file_client.bodies.len(), - remaining_bytes_len=bytes.len(), - "parsed blocks that were read from file" - ); + T::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; // save left over bytes self.chunk = bytes; @@ -432,6 +451,20 @@ impl ChunkedFileReader { } } +/// Constructs a file client from a reader. +pub trait FromReader { + /// Error returned by file client type. + type Error: From; + /// Returns a file client + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + Self: Sized, + B: AsyncReadExt + Unpin; +} + #[cfg(test)] mod tests { use super::*; @@ -594,7 +627,7 @@ mod tests { // test - while let Some(client) = reader.next_chunk().await.unwrap() { + while let Some(client) = reader.next_chunk::().await.unwrap() { let sync_target = client.tip_header().unwrap(); let sync_target_hash = sync_target.hash(); diff --git a/crates/net/downloaders/src/file_codec_ovm_receipt.rs b/crates/net/downloaders/src/file_codec_ovm_receipt.rs new file mode 100644 index 000000000..5b3c81a92 --- /dev/null +++ b/crates/net/downloaders/src/file_codec_ovm_receipt.rs @@ -0,0 +1,344 @@ +//! Codec for reading raw receipts from a file. + +use alloy_rlp::{Decodable, RlpDecodable}; +use reth_primitives::{ + bytes::{Buf, BytesMut}, + Address, Bloom, Bytes, Log, Receipt, TxType, B256, +}; +use tokio_util::codec::Decoder; + +use crate::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber}; + +/// Codec for reading raw receipts from a file. +/// +/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the +/// framed reader has capacity for the entire receipts file. Otherwise, the decoder will return +/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP receipts can only be +/// decoded if the internal buffer is large enough to contain the entire receipt. +/// +/// Without ensuring the framed reader has capacity for the entire file, a receipt is likely to +/// fall across two read buffers, the decoder will not be able to decode the receipt, which will +/// cause it to fail. +/// +/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set +/// the capacity of the framed reader to the size of the file. +#[derive(Debug)] +pub struct HackReceiptFileCodec; + +impl Decoder for HackReceiptFileCodec { + type Item = Option; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None) + } + + let buf_slice = &mut src.as_ref(); + let receipt = HackReceiptContainer::decode(buf_slice) + .map_err(|err| Self::Error::Rlp(err, src.to_vec()))? + .0; + src.advance(src.len() - buf_slice.len()); + + Ok(Some( + receipt.map(|receipt| receipt.try_into().map_err(FileClientError::from)).transpose()?, + )) + } +} + +/// See +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +pub struct HackReceipt { + tx_type: u8, + post_state: Bytes, + status: u64, + cumulative_gas_used: u64, + bloom: Bloom, + /// + logs: Vec, + tx_hash: B256, + contract_address: Address, + gas_used: u64, + block_hash: B256, + block_number: u64, + transaction_index: u32, + l1_gas_price: u64, + l1_gas_used: u64, + l1_fee: u64, + fee_scalar: String, +} + +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +#[rlp(trailing)] +struct HackReceiptContainer(Option); + +impl TryFrom for ReceiptWithBlockNumber { + type Error = &'static str; + fn try_from(exported_receipt: HackReceipt) -> Result { + let HackReceipt { + tx_type, status, cumulative_gas_used, logs, block_number: number, .. + } = exported_receipt; + + #[allow(clippy::needless_update)] + let receipt = Receipt { + tx_type: TxType::try_from(tx_type.to_be_bytes()[0])?, + success: status != 0, + cumulative_gas_used, + logs, + ..Default::default() + }; + + Ok(Self { receipt, number }) + } +} + +#[cfg(test)] +pub(super) mod test { + use reth_primitives::{alloy_primitives::LogData, hex}; + + use super::*; + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!("f9030ff9030c8080018303183db9010000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb6834800080a05e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a9400000000000000000000000000000000000000008303183da0bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e8754530180018212c2821c2383312e35"); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!("f90271f9026e8080018301c60db9010000080000000200000000000000000008000000000000000000000100008000000000000000000000000000000000000000000000000000000000400000000000100000000000000000000000020000000000000000000000000000000000004000000000000000000000000000000000400000000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000100000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000008400000000000000000010000000000000000020000000020000000000000000000000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007eda7867e0c7d4800080a0af6ed8a6864d44989adc47c84f6fe0aeb1819817505c42cde6cbbcd5e14dd3179400000000000000000000000000000000000000008301c60da045fd6ce41bb8ebb2bccdaa92dd1619e287704cb07722039901a7eba63dea1d130280018212c2821c2383312e35"); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!("f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35"); + + fn hack_receipt_1() -> HackReceipt { + let receipt = receipt_block_1(); + + HackReceipt { + tx_type: receipt.receipt.tx_type as u8, + post_state: Bytes::default(), + status: receipt.receipt.success as u64, + cumulative_gas_used: receipt.receipt.cumulative_gas_used, + bloom: Bloom::from(hex!("00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000")), + logs: receipt.receipt.logs, + tx_hash: B256::from(hex!("5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a")), contract_address: Address::from(hex!("0000000000000000000000000000000000000000")), gas_used: 202813, + block_hash: B256::from(hex!("bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453")), + block_number: receipt.number, + transaction_index: 0, + l1_gas_price: 1, + l1_gas_used: 4802, + l1_fee: 7203, + fee_scalar: String::from("1.5") + } + } + + pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d" + )), + ], + Bytes::from(hex!( + "00000000000000000000000000000000000000000000000000000000618d8837" + )), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d0e3ebf0" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_3 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007edc6ca0bb68348000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 202813, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2, log_3]; + + ReceiptWithBlockNumber { receipt, number: 1 } + } + + pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d0ea0e40" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007eda7867e0c7d48000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 116237, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2]; + + ReceiptWithBlockNumber { receipt, number: 2 } + } + + pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d101e54b" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007ed8842f0627748000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 116237, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2]; + + ReceiptWithBlockNumber { receipt, number: 3 } + } + + #[test] + fn decode_hack_receipt() { + let receipt = hack_receipt_1(); + + let decoded = HackReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..]) + .unwrap() + .0 + .unwrap(); + + assert_eq!(receipt, decoded); + } + + #[test] + #[allow(clippy::needless_update)] + fn receipts_codec() { + // rig + + let mut receipt_1_to_3 = HACK_RECEIPT_ENCODED_BLOCK_1.to_vec(); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3); + + let encoded = &mut BytesMut::from(&receipt_1_to_3[..]); + + let mut codec = HackReceiptFileCodec; + + // test + + let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_1(), first_decoded_receipt); + + let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_2(), second_decoded_receipt); + + let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_3(), third_decoded_receipt); + } +} diff --git a/crates/net/downloaders/src/lib.rs b/crates/net/downloaders/src/lib.rs index 37c4a95e3..81e669d88 100644 --- a/crates/net/downloaders/src/lib.rs +++ b/crates/net/downloaders/src/lib.rs @@ -27,10 +27,29 @@ pub mod metrics; /// efficiently buffering headers and bodies for retrieval. pub mod file_client; +/// Module managing file-based data retrieval and buffering of receipts. +/// +/// Contains [ReceiptFileClient](receipt_file_client::ReceiptFileClient) to read receipt data from +/// files, efficiently buffering receipts for retrieval. +/// +/// Currently configured to use codec [`HackReceipt`](file_codec_ovm_receipt::HackReceipt) based on +/// export of below Bedrock data using . Codec can +/// be replaced with regular encoding of receipts for export. +/// +/// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit +/// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `HackReceipt` type (originally +/// made for op-erigon's import needs). +pub mod receipt_file_client; + /// Module with a codec for reading and encoding block bodies in files. /// /// Enables decoding and encoding `Block` types within file contexts. pub mod file_codec; +/// Module with a codec for reading and encoding receipts in files. +/// +/// Enables decoding and encoding `HackReceipt` type. See . +pub mod file_codec_ovm_receipt; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs new file mode 100644 index 000000000..b6291d0a3 --- /dev/null +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -0,0 +1,268 @@ +use futures::Future; +use reth_primitives::{Receipt, Receipts}; +use tokio::io::AsyncReadExt; +use tokio_stream::StreamExt; +use tokio_util::codec::FramedRead; +use tracing::trace; + +use crate::{ + file_client::{FileClientError, FromReader}, + file_codec_ovm_receipt::HackReceiptFileCodec, +}; + +/// File client for reading RLP encoded receipts from file. Receipts in file must be in sequential +/// order w.r.t. block number. +#[derive(Debug)] +pub struct ReceiptFileClient { + /// The buffered receipts, read from file, as nested lists. One list per block number. + pub receipts: Receipts, + /// First (lowest) block number read from file. + pub first_block: u64, + /// Total number of receipts. Count of elements in [`Receipts`] flattened. + pub total_receipts: usize, +} + +impl FromReader for ReceiptFileClient { + type Error = FileClientError; + + /// Initialize the [`ReceiptFileClient`] from bytes that have been read from file. Caution! If + /// first block has no transactions, it's assumed to be the genesis block. + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + let mut receipts = Receipts::new(); + + // use with_capacity to make sure the internal buffer contains the entire chunk + let mut stream = + FramedRead::with_capacity(reader, HackReceiptFileCodec, num_bytes as usize); + + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + coded=?HackReceiptFileCodec, + "init decode stream" + ); + + let mut remaining_bytes = vec![]; + + let mut log_interval = 0; + let mut log_interval_start_block = 0; + + let mut block_number = 0; + let mut total_receipts = 0; + let mut receipts_for_block = vec![]; + let mut first_block = None; + + async move { + while let Some(receipt_res) = stream.next().await { + let receipt = match receipt_res { + Ok(receipt) => receipt, + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial receipt returned from decoding chunk" + ); + + remaining_bytes = bytes; + + break + } + Err(err) => return Err(err), + }; + + total_receipts += 1; + + match receipt { + Some(ReceiptWithBlockNumber { receipt, number }) => { + if first_block.is_none() { + first_block = Some(number); + block_number = number; + } + + if block_number == number { + receipts_for_block.push(Some(receipt)); + } else { + receipts.push(receipts_for_block); + + // next block + block_number = number; + receipts_for_block = vec![Some(receipt)]; + } + } + None => { + match first_block { + Some(num) => { + // if there was a block number before this, push receipts for that + // block + receipts.push(receipts_for_block); + // block with no txns + block_number = num + receipts.len() as u64; + } + None => { + // this is the first block and it's empty, assume it's the genesis + // block + first_block = Some(0); + block_number = 0; + } + } + + receipts_for_block = vec![]; + } + } + + if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + total_receipts, + "read first receipt" + ); + log_interval_start_block = block_number; + } else if log_interval % 100_000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + total_receipts, + "read receipts from file" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; + } + + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + total_receipts, + "read receipts from file" + ); + + // we need to push the last receipts + receipts.push(receipts_for_block); + + trace!(target: "downloaders::file", + blocks = receipts.len(), + total_receipts, + "Initialized receipt file client" + ); + + Ok(( + Self { receipts, first_block: first_block.unwrap_or_default(), total_receipts }, + remaining_bytes, + )) + } + } +} + +/// [`Receipt`] with block number. +#[derive(Debug, PartialEq, Eq)] +pub struct ReceiptWithBlockNumber { + /// Receipt. + pub receipt: Receipt, + /// Block number. + pub number: u64, +} + +#[cfg(test)] +mod test { + use reth_primitives::hex; + use reth_tracing::init_test_tracing; + + use crate::file_codec_ovm_receipt::test::{ + receipt_block_1 as op_mainnet_receipt_block_1, + receipt_block_2 as op_mainnet_receipt_block_2, + receipt_block_3 as op_mainnet_receipt_block_3, + HACK_RECEIPT_ENCODED_BLOCK_1 as HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_2 as HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_3 as HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET, + }; + + use super::*; + + /// No receipts for genesis block + const HACK_RECEIPT_BLOCK_NO_TRANSACTIONS: &[u8] = &hex!("c0"); + + #[tokio::test] + async fn receipt_file_client_ovm_codec() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 and 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + // no receipt for block 4 + encoded_receipts.extend_from_slice(HACK_RECEIPT_BLOCK_NO_TRANSACTIONS); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(4, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][0].clone().unwrap()); + assert!(receipts[3].is_empty()); + } + + #[tokio::test] + async fn no_receipts_middle_block() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + // no receipt for block 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_BLOCK_NO_TRANSACTIONS); + // one receipt for block 3 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(4, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert!(receipts[2].is_empty()); + assert_eq!(op_mainnet_receipt_block_3().receipt, receipts[3][0].clone().unwrap()); + } + + #[tokio::test] + async fn two_receipts_same_block() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + // two receipts for block 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + // one receipt for block 3 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(5, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][1].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_3().receipt, receipts[3][0].clone().unwrap()); + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index f4be57f9c..8d31358d9 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -66,8 +66,6 @@ revm-primitives = { workspace = true, features = ["arbitrary"] } nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } - -arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true proptest-derive.workspace = true @@ -109,7 +107,6 @@ zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ "reth-codecs/optimism", - "revm-primitives/optimism", "reth-ethereum-forks/optimism", "revm/optimism", ] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 838edd620..2b146245e 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,6 +21,7 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, + StaticFileWriter, }; #[cfg(any(test, feature = "test-utils"))] From a7d8da5a27b8cfba128236afb3cf949012315cc1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 12:04:56 +0200 Subject: [PATCH 229/250] feat(grafana): State & History panel (#8144) --- etc/grafana/dashboards/reth-state-growth.json | 1735 +++++++++++++++++ 1 file changed, 1735 insertions(+) create mode 100644 etc/grafana/dashboards/reth-state-growth.json diff --git a/etc/grafana/dashboards/reth-state-growth.json b/etc/grafana/dashboards/reth-state-growth.json new file mode 100644 index 000000000..f6a7bbf84 --- /dev/null +++ b/etc/grafana/dashboards/reth-state-growth.json @@ -0,0 +1,1735 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Ethereum state growth", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 0 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 0 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 0 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 0 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 7, + "panels": [], + "title": "State", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 4 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval])) + avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval])) + avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 4 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=~\"PlainAccountState|PlainStorageState|Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Account State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 14 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Storage State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytecodes Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 8, + "panels": [], + "title": "History", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Growth (interval = ${interval})", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Headers", + "reducer": "sum", + "right": "Receipts" + }, + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 14, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Size", + "transformations": [ + { + "id": "calculateField", + "options": { + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Headers Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Receipts Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 55 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions Growth (interval = ${interval})", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "10m", + "value": "10m" + }, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": true, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "5m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - State & History", + "uid": "cab0fcc6-1c33-478c-9675-38bc1af5de82", + "version": 1, + "weekStart": "" + } \ No newline at end of file From 8954ffb42333a0966d509ae470558a5d96396dbc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 12:37:17 +0200 Subject: [PATCH 230/250] chore(grafana): dashboard names like public (#8148) --- etc/grafana/dashboards/overview.json | 2 +- etc/grafana/dashboards/reth-discovery.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 40c120e0f..eacc3a25c 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -8511,7 +8511,7 @@ }, "timepicker": {}, "timezone": "", - "title": "reth", + "title": "Reth", "uid": "2k8BXz24x", "version": 1, "weekStart": "" diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 787913e65..037d6b3bf 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -1124,7 +1124,7 @@ }, "timepicker": {}, "timezone": "", - "title": "reth - discovery", + "title": "Reth - Peer Discovery", "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", "version": 1, "weekStart": "" From 6a6fe4cec229f646ddbf2e1b1c0a46ec5aa02d84 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 13:46:56 +0200 Subject: [PATCH 231/250] fix(discv5): update metrics wrt unverifiable enrs (#8149) Co-authored-by: Alexey Shekhirin --- crates/net/discv5/src/lib.rs | 2 + crates/net/discv5/src/metrics.rs | 38 +++++--- etc/grafana/dashboards/reth-discovery.json | 91 ++++++++++++++++--- etc/grafana/dashboards/reth-state-growth.json | 22 +++++ 4 files changed, 127 insertions(+), 26 deletions(-) diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 826556fb0..ffa3c9caf 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -273,6 +273,8 @@ impl Discv5 { "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" ); + self.metrics.discovered_peers.increment_unverifiable_enrs_raw_total(1); + self.on_discovered_peer(&enr, socket) } _ => None diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 7bd3572f7..d58ed66e0 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -21,7 +21,7 @@ pub struct DiscoveredPeersMetrics { // Kbuckets //////////////////////////////////////////////////////////////////////////////////////////////// /// Total peers currently in [`discv5::Discv5`]'s kbuckets. - total_kbucket_peers_raw: Gauge, + kbucket_peers_raw_total: Gauge, /// Total discovered peers that are inserted into [`discv5::Discv5`]'s kbuckets. /// /// This is a subset of the total established sessions, in which all peers advertise a udp @@ -29,58 +29,72 @@ pub struct DiscoveredPeersMetrics { /// it into [`discv5::Discv5`]'s kbuckets and will hence be included in queries. /// /// Note: the definition of 'discovered' is not exactly synonymous in `reth_discv4::Discv4`. - total_inserted_kbucket_peers_raw: Counter, + inserted_kbucket_peers_raw_total: Counter, //////////////////////////////////////////////////////////////////////////////////////////////// // Sessions //////////////////////////////////////////////////////////////////////////////////////////////// /// Total peers currently connected to [`discv5::Discv5`]. - total_sessions_raw: Gauge, + sessions_raw_total: Gauge, /// Total number of sessions established by [`discv5::Discv5`]. - total_established_sessions_raw: Counter, + established_sessions_raw_total: Counter, /// Total number of sessions established by [`discv5::Discv5`], with peers that don't advertise /// a socket which is reachable from the local node in their node record. /// /// These peers can't make it into [`discv5::Discv5`]'s kbuckets, and hence won't be part of /// queries (neither shared with peers in NODES responses, nor queried for peers with FINDNODE /// requests). - total_established_sessions_unreachable_enr: Counter, + established_sessions_unreachable_enr_total: Counter, /// Total number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. - total_established_sessions_custom_filtered: Counter, + established_sessions_custom_filtered_total: Counter, + /// Total number of unverifiable ENRs discovered by [`discv5::Discv5`]. + /// + /// These are peers that fail [`discv5::Discv5`] session establishment, because the UDP socket + /// they're making a connection from doesn't match the UDP socket advertised in their ENR. + /// These peers will be denied a session (and hence can't make it into kbuckets) until they + /// have update their ENR, to reflect their actual UDP socket. + unverifiable_enrs_raw_total: Counter, } impl DiscoveredPeersMetrics { /// Sets current total number of peers in [`discv5::Discv5`]'s kbuckets. pub fn set_total_kbucket_peers(&self, num: usize) { - self.total_kbucket_peers_raw.set(num as f64) + self.kbucket_peers_raw_total.set(num as f64) } /// Increments the number of kbucket insertions in [`discv5::Discv5`]. pub fn increment_kbucket_insertions(&self, num: u64) { - self.total_inserted_kbucket_peers_raw.increment(num) + self.inserted_kbucket_peers_raw_total.increment(num) } /// Sets current total number of peers connected to [`discv5::Discv5`]. pub fn set_total_sessions(&self, num: usize) { - self.total_sessions_raw.set(num as f64) + self.sessions_raw_total.set(num as f64) } /// Increments number of sessions established by [`discv5::Discv5`]. pub fn increment_established_sessions_raw(&self, num: u64) { - self.total_established_sessions_raw.increment(num) + self.established_sessions_raw_total.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], with peers that don't have /// a reachable node record. pub fn increment_established_sessions_unreachable_enr(&self, num: u64) { - self.total_established_sessions_unreachable_enr.increment(num) + self.established_sessions_unreachable_enr_total.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. pub fn increment_established_sessions_filtered(&self, num: u64) { - self.total_established_sessions_custom_filtered.increment(num) + self.established_sessions_custom_filtered_total.increment(num) + } + + /// Increments number of unverifiable ENRs discovered by [`discv5::Discv5`]. These are peers + /// that fail session establishment because their advertised UDP socket doesn't match the + /// socket they are making the connection from. + pub fn increment_unverifiable_enrs_raw_total(&self, num: u64) { + self.unverifiable_enrs_raw_total.increment(num) } } diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 037d6b3bf..4a1ef344c 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -52,6 +52,7 @@ } ] }, + "description": "Devp2p peer discovery protocols", "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, @@ -578,7 +579,7 @@ { "id": "color", "value": { - "fixedColor": "purple", + "fixedColor": "#9b73d6", "mode": "fixed" } } @@ -628,7 +629,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", + "expr": "reth_discv5_kbucket_peers_raw_total{instance=\"$instance\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -644,7 +645,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", + "expr": "reth_discv5_sessions_raw_total{instance=\"$instance\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -663,7 +664,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", + "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\nSessions will succeed to peers that advertise no UDP socket in their ENR. This allows peers to discover their reachable socket. On the other hand, for DoS protection, peers that advertise a different socket than the socket from which they make the connection, are denied a sigp/discv5 session. These peers have an unverifiable ENR. The peers are passed to RLPx nonetheless (some EL implementations of discv5 are more lax about ENR and source socket matching). ", "fieldConfig": { "defaults": { "color": { @@ -728,7 +729,7 @@ { "id": "color", "value": { - "fixedColor": "purple", + "fixedColor": "light-green", "mode": "fixed" } } @@ -743,7 +744,7 @@ { "id": "color", "value": { - "fixedColor": "super-light-red", + "fixedColor": "#9958f4", "mode": "fixed" } } @@ -778,6 +779,36 @@ } } ] + }, + { + "matcher": { + "id": "byName", + "options": "Session Establishment Failed (unverifiable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Failed Session Establishments (unverifiable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] } ] }, @@ -808,7 +839,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_inserted_kbucket_peers_raw_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": false, "instant": false, @@ -824,7 +855,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -841,7 +872,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_established_sessions_unreachable_enr_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -858,7 +889,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", + "expr": "rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_established_sessions_custom_filtered_total{instance=\"$instance\"}[$__rate_interval])", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -867,6 +898,23 @@ "range": true, "refId": "D", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_unverifiable_enrs_raw_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Session Establishments (unverifiable ENR)", + "range": true, + "refId": "E", + "useBackend": false } ], "title": "Peer Churn", @@ -942,7 +990,7 @@ { "id": "color", "value": { - "fixedColor": "purple", + "fixedColor": "#b677d9", "mode": "fixed" } } @@ -957,7 +1005,7 @@ { "id": "color", "value": { - "fixedColor": "super-light-red", + "fixedColor": "light-green", "mode": "fixed" } } @@ -977,6 +1025,21 @@ } } ] + }, + { + "matcher": { + "id": "byName", + "options": "OP EL" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8AB8FF", + "mode": "fixed" + } + } + ] } ] }, @@ -1074,7 +1137,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", + "expr": "(rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_unverifiable_enrs_raw_total{instance=\"$instance\"}[$__rate_interval])) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, @@ -1125,7 +1188,7 @@ "timepicker": {}, "timezone": "", "title": "Reth - Peer Discovery", - "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", + "uid": "fd2d69b5-ca32-45d0-946e-c00ddcd7052c", "version": 1, "weekStart": "" } \ No newline at end of file diff --git a/etc/grafana/dashboards/reth-state-growth.json b/etc/grafana/dashboards/reth-state-growth.json index f6a7bbf84..35077706e 100644 --- a/etc/grafana/dashboards/reth-state-growth.json +++ b/etc/grafana/dashboards/reth-state-growth.json @@ -1652,6 +1652,28 @@ "tags": [], "templating": { "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, { "current": { "selected": true, From ad00e83e6240334e90f9f0fb84a8dca0c6d5be09 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 13:56:28 +0200 Subject: [PATCH 232/250] chore: move dashmap to workspace dep (#8153) --- Cargo.toml | 1 + crates/storage/libmdbx-rs/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dca4a1eb8..b45009795 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -311,6 +311,7 @@ aquamarine = "0.5" bytes = "1.5" bitflags = "2.4" clap = "4" +dashmap = "5.5" derive_more = "0.99.17" fdlimit = "0.3.0" eyre = "0.6" diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 2330b6f79..2042cd896 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -22,7 +22,7 @@ indexmap = "2" libc = "0.2" parking_lot.workspace = true thiserror.workspace = true -dashmap = { version = "5.5.3", features = ["inline"], optional = true } +dashmap = { workspace = true, features = ["inline"], optional = true } tracing.workspace = true ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 526cbdaa5..672f6a7fc 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -40,7 +40,7 @@ auto_impl.workspace = true itertools.workspace = true pin-project.workspace = true parking_lot.workspace = true -dashmap = { version = "5.5", features = ["inline"] } +dashmap = { workspace = true, features = ["inline"] } strum.workspace = true # test-utils From 04d2c10c46ecccb870752b56238dfe7cf310f0fd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 15:18:40 +0200 Subject: [PATCH 233/250] chore: rm outdated executor types (#8157) --- crates/consensus/beacon/src/engine/sync.rs | 7 +- .../provider/src/test_utils/executor.rs | 71 ------------------- crates/storage/provider/src/test_utils/mod.rs | 2 - .../storage/provider/src/traits/executor.rs | 68 ------------------ crates/storage/provider/src/traits/mod.rs | 3 - 5 files changed, 2 insertions(+), 149 deletions(-) delete mode 100644 crates/storage/provider/src/test_utils/executor.rs delete mode 100644 crates/storage/provider/src/traits/executor.rs diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 261b6874f..f73c4b54e 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -440,8 +440,8 @@ mod tests { Header, PruneModes, SealedHeader, MAINNET, }; use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, StaticFileProviderFactory, + test_utils::create_test_provider_factory_with_chain_spec, BundleStateWithReceipts, + StaticFileProviderFactory, }; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_static_file::StaticFileProducer; @@ -492,9 +492,6 @@ mod tests { fn build(self, chain_spec: Arc) -> Pipeline>> { reth_tracing::init_test_tracing(); - let executor_factory = TestExecutorFactory::default(); - executor_factory.extend(self.executor_results); - // Setup pipeline let (tip_tx, _tip_rx) = watch::channel(B256::default()); let mut pipeline = Pipeline::builder() diff --git a/crates/storage/provider/src/test_utils/executor.rs b/crates/storage/provider/src/test_utils/executor.rs deleted file mode 100644 index 8ac963e93..000000000 --- a/crates/storage/provider/src/test_utils/executor.rs +++ /dev/null @@ -1,71 +0,0 @@ -use crate::{ - bundle_state::BundleStateWithReceipts, BlockExecutor, ExecutorFactory, PrunableBlockExecutor, - StateProvider, -}; -use parking_lot::Mutex; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; -use std::sync::Arc; -/// Test executor with mocked result. -#[derive(Debug)] -pub struct TestExecutor(pub Option); - -impl BlockExecutor for TestExecutor { - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - _block: &BlockWithSenders, - _total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - if self.0.is_none() { - return Err(BlockExecutionError::UnavailableForTest) - } - Ok(()) - } - - fn execute_transactions( - &mut self, - _block: &BlockWithSenders, - _total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - Err(BlockExecutionError::UnavailableForTest) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - self.0.clone().unwrap_or_default() - } - - fn size_hint(&self) -> Option { - None - } -} - -impl PrunableBlockExecutor for TestExecutor { - fn set_tip(&mut self, _tip: BlockNumber) {} - - fn set_prune_modes(&mut self, _prune_modes: PruneModes) {} -} - -/// Executor factory with pre-set execution results. -#[derive(Clone, Debug, Default)] -pub struct TestExecutorFactory { - exec_results: Arc>>, -} - -impl TestExecutorFactory { - /// Extend the mocked execution results - pub fn extend(&self, results: Vec) { - self.exec_results.lock().extend(results); - } -} - -impl ExecutorFactory for TestExecutorFactory { - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - _sp: SP, - ) -> Box::Error> + 'a> { - let exec_res = self.exec_results.lock().pop(); - Box::new(TestExecutor(exec_res)) - } -} diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index f4a5626f6..2f5462309 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -8,12 +8,10 @@ use std::sync::Arc; pub mod blocks; mod events; -mod executor; mod mock; mod noop; pub use events::TestCanonStateSubscriptions; -pub use executor::{TestExecutor, TestExecutorFactory}; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; diff --git a/crates/storage/provider/src/traits/executor.rs b/crates/storage/provider/src/traits/executor.rs deleted file mode 100644 index f12d64169..000000000 --- a/crates/storage/provider/src/traits/executor.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Executor Factory - -use crate::{bundle_state::BundleStateWithReceipts, StateProvider}; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; - -/// A factory capable of creating an executor with the given state provider. -pub trait ExecutorFactory: Send + Sync + 'static { - /// Executor with [`StateProvider`] - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a>; -} - -/// An executor capable of executing a block. -/// -/// This type is capable of executing (multiple) blocks by applying the state changes made by each -/// block. The final state of the executor can extracted using -/// [`Self::take_output_state`]. -pub trait BlockExecutor { - /// The error type returned by the executor. - type Error; - - /// Executes the entire block and verifies: - /// - receipts (receipts root) - /// - /// This will update the state of the executor with the changes made by the block. - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error>; - - /// Runs the provided transactions and commits their state to the run-time database. - /// - /// The returned [BundleStateWithReceipts] can be used to persist the changes to disk, and - /// contains the changes made by each transaction. - /// - /// The changes in [BundleStateWithReceipts] have a transition ID associated with them: there is - /// one transition ID for each transaction (with the first executed tx having transition ID - /// 0, and so on). - /// - /// The second returned value represents the total gas used by this block of transactions. - /// - /// See [execute_and_verify_receipt](BlockExecutor::execute_and_verify_receipt) for more - /// details. - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error>; - - /// Return bundle state. This is output of executed blocks. - fn take_output_state(&mut self) -> BundleStateWithReceipts; - - /// Returns the size hint of current in-memory changes. - fn size_hint(&self) -> Option; -} - -/// A [BlockExecutor] capable of in-memory pruning of the data that will be written to the database. -pub trait PrunableBlockExecutor: BlockExecutor { - /// Set tip - highest known block number. - fn set_tip(&mut self, tip: BlockNumber); - - /// Set prune modes. - fn set_prune_modes(&mut self, prune_modes: PruneModes); -} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 6d78cf583..9aa96bccf 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -48,9 +48,6 @@ pub use transactions::{TransactionsProvider, TransactionsProviderExt}; mod withdrawals; pub use withdrawals::WithdrawalsProvider; -mod executor; -pub use executor::{BlockExecutor, ExecutorFactory, PrunableBlockExecutor}; - mod chain; pub use chain::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, From 72e5122e73ef981a1cd95d97aac1f1cf7de1f691 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 8 May 2024 15:21:16 +0200 Subject: [PATCH 234/250] fix: prevents potential arithmetic underflow (#8156) --- crates/blockchain-tree/src/chain.rs | 3 ++- crates/interfaces/src/blockchain_tree/error.rs | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 637ea52e7..db4b4627a 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -119,7 +119,8 @@ impl AppendableChain { DB: Database + Clone, E: BlockExecutorProvider, { - let parent_number = block.number - 1; + let parent_number = + block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; let parent = self.blocks().get(&parent_number).ok_or( BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, )?; diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index e9cdb8714..a98d76501 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -47,6 +47,9 @@ pub enum BlockchainTreeError { /// The block hash of the block that failed to buffer. block_hash: BlockHash, }, + /// Thrown when trying to access genesis parent. + #[error("genesis block has no parent")] + GenesisBlockHasNoParent, } /// Canonical Errors @@ -318,7 +321,8 @@ impl InsertBlockErrorKind { BlockchainTreeError::CanonicalChain { .. } | BlockchainTreeError::BlockNumberNotFoundInChain { .. } | BlockchainTreeError::BlockHashNotFoundInChain { .. } | - BlockchainTreeError::BlockBufferingFailed { .. } => false, + BlockchainTreeError::BlockBufferingFailed { .. } | + BlockchainTreeError::GenesisBlockHasNoParent => false, } } InsertBlockErrorKind::Provider(_) | InsertBlockErrorKind::Internal(_) => { From 18b7edb1910f211cf09dcbe9ad8a687ba69b3da5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 8 May 2024 14:42:14 +0100 Subject: [PATCH 235/250] fix(examples): ExEx rollup reverts (#8151) --- examples/exex/rollup/src/db.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs index 39c2b418b..201272a00 100644 --- a/examples/exex/rollup/src/db.rs +++ b/examples/exex/rollup/src/db.rs @@ -114,13 +114,13 @@ impl Database { if reverts.accounts.len() > 1 { eyre::bail!("too many blocks in account reverts"); } - for (address, account) in - reverts.accounts.first().ok_or(eyre::eyre!("no account reverts"))? - { - tx.execute( - "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", - (block.header.number.to_string(), address.to_string(), serde_json::to_string(account)?), - )?; + if let Some(account_reverts) = reverts.accounts.into_iter().next() { + for (address, account) in account_reverts { + tx.execute( + "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), serde_json::to_string(&account)?), + )?; + } } for PlainStorageChangeset { address, wipe_storage, storage } in changeset.storage { @@ -139,19 +139,19 @@ impl Database { if reverts.storage.len() > 1 { eyre::bail!("too many blocks in storage reverts"); } - for PlainStorageRevert { address, wiped, storage_revert } in - reverts.storage.into_iter().next().ok_or(eyre::eyre!("no storage reverts"))? - { - let storage = storage_revert - .into_iter() - .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) - .collect::>(); - let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; - for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { - tx.execute( + if let Some(storage_reverts) = reverts.storage.into_iter().next() { + for PlainStorageRevert { address, wiped, storage_revert } in storage_reverts { + let storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; + for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { + tx.execute( "INSERT INTO storage_revert (block_number, address, key, data) VALUES (?, ?, ?, ?) ON CONFLICT(block_number, address, key) DO UPDATE SET data = excluded.data", (block.header.number.to_string(), address.to_string(), key.to_string(), data.to_string()), )?; + } } } From bdb8238d79c383033a68cbeef8809c37a481e303 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 15:45:47 +0200 Subject: [PATCH 236/250] chore: remove type aliases (#8155) --- crates/revm/src/database.rs | 11 ++--------- crates/rpc/rpc/src/debug.rs | 4 ++-- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 36a7ec96f..93a22a068 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,19 +1,12 @@ -use reth_interfaces::RethError; use reth_primitives::{Address, B256, KECCAK_EMPTY, U256}; use reth_provider::{ProviderError, StateProvider}; use revm::{ - db::{CacheDB, DatabaseRef}, + db::DatabaseRef, primitives::{AccountInfo, Bytecode}, - Database, StateDBBox, + Database, }; use std::ops::{Deref, DerefMut}; -/// SubState of database. Uses revm internal cache with binding to reth StateProvider trait. -pub type SubState = CacheDB>; - -/// State boxed database with reth Error. -pub type RethStateDBBox<'a> = StateDBBox<'a, RethError>; - /// Wrapper around StateProvider that implements revm database trait #[derive(Debug, Clone)] pub struct StateProviderDatabase(pub DB); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b21adf520..ebc52877d 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -17,7 +17,7 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProviderBox, TransactionVariant, }; -use reth_revm::database::{StateProviderDatabase, SubState}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_types::{ trace::geth::{ @@ -517,7 +517,7 @@ where &self, opts: GethDebugTracingOptions, env: EnvWithHandlerCfg, - db: &mut SubState, + db: &mut CacheDB>, transaction_context: Option, ) -> EthResult<(GethTrace, revm_primitives::State)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; From 331d1a0c6a7c9e768649ee051ae56963b946fa09 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 May 2024 10:26:48 -0400 Subject: [PATCH 237/250] feat(ci): add op-reth to release workflow (#8132) --- .github/workflows/docker.yml | 12 ++++++++-- .github/workflows/release.yml | 20 +++++++++------- DockerfileOp.cross | 15 ++++++++++++ Makefile | 44 +++++++++++++++++++++++++++++++++++ 4 files changed, 80 insertions(+), 11 deletions(-) create mode 100644 DockerfileOp.cross diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1fb89f7c1..20ae6644b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -11,8 +11,10 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth + OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth DOCKER_USERNAME: ${{ github.actor }} jobs: @@ -36,9 +38,15 @@ jobs: run: | docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 docker buildx create --use --name cross-builder - - name: Build and push image, tag as "latest" + - name: Build and push reth image, tag as "latest" if: ${{ contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push-latest - - name: Build and push image + - name: Build and push reth image if: ${{ ! contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push + - name: Build and push op-reth image, tag as "latest" + if: ${{ contains(github.event.ref, 'beta') }} + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest + - name: Build and push op-reth image + if: ${{ ! contains(github.event.ref, 'beta') }} + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 55ce0843f..8562da807 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,6 +10,7 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth IMAGE_NAME: ${{ github.repository_owner }}/reth CARGO_TERM_COLOR: always @@ -30,6 +31,7 @@ jobs: needs: extract-version strategy: matrix: + build: [{command: build, binary: reth}, {command: build-op, binary: op-reth}] include: - target: x86_64-unknown-linux-gnu os: ubuntu-20.04 @@ -63,12 +65,12 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV - name: Build Reth - run: make PROFILE=${{ matrix.profile }} build-${{ matrix.target }} + run: make PROFILE=${{ matrix.profile }} ${{ matrix.build.command }}-${{ matrix.target }} - name: Move binary run: | mkdir artifacts [[ "${{ matrix.target }}" == *windows* ]] && ext=".exe" - mv "target/${{ matrix.target }}/${{ matrix.profile }}/reth${ext}" ./artifacts + mv "target/${{ matrix.target }}/${{ matrix.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -78,22 +80,22 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz reth* - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz ${{ matrix.build.binary }}* + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz mv *tar.gz* .. shell: bash - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - name: Upload signature uses: actions/upload-artifact@v4 with: - name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc - path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc draft-release: name: draft release @@ -184,7 +186,7 @@ jobs: ENDBODY ) assets=() - for asset in ./reth-*.tar.gz*; do + for asset in ./*reth-*.tar.gz*; do assets+=("$asset/$asset") done tag_name="${{ env.VERSION }}" diff --git a/DockerfileOp.cross b/DockerfileOp.cross new file mode 100644 index 000000000..47606a828 --- /dev/null +++ b/DockerfileOp.cross @@ -0,0 +1,15 @@ +# This image is meant to enable cross-architecture builds. +# It assumes the reth binary has already been compiled for `$TARGETPLATFORM` and is +# locatable in `./dist/bin/$TARGETARCH` +FROM --platform=$TARGETPLATFORM ubuntu:22.04 + +LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" + +# Filled by docker buildx +ARG TARGETARCH + +COPY ./dist/bin/$TARGETARCH/op-reth /usr/local/bin/op-reth + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT ["/usr/local/bin/op-reth"] diff --git a/Makefile b/Makefile index 5ac3bb468..f19a3cd8c 100644 --- a/Makefile +++ b/Makefile @@ -227,6 +227,50 @@ define docker_build_push --push endef +##@ Optimism docker + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push +op-docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. + $(call op_docker_build_push,$(GIT_TAG),$(GIT_TAG)) + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push-latest +op-docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`. + $(call op_docker_build_push,$(GIT_TAG),latest) + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --name cross-builder` +.PHONY: op-docker-build-push-nightly +op-docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`. + $(call op_docker_build_push,$(GIT_TAG)-nightly,latest-nightly) + +# Create a cross-arch Docker image with the given tags and push it +define op_docker_build_push + $(MAKE) op-build-x86_64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/amd64 + cp $(BUILD_PATH)/x86_64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/amd64/op-reth + + $(MAKE) op-build-aarch64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/arm64 + cp $(BUILD_PATH)/aarch64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/arm64/op-reth + + docker buildx build --file ./DockerfileOp.cross . \ + --platform linux/amd64,linux/arm64 \ + --tag $(DOCKER_IMAGE_NAME):$(1) \ + --tag $(DOCKER_IMAGE_NAME):$(2) \ + --provenance=false \ + --push +endef + ##@ Other .PHONY: clean From d852f7f012c1811a3d1197fd9c5fc63be5ec8233 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 May 2024 16:37:45 +0200 Subject: [PATCH 238/250] chore(op): simplify op cli args (#8146) --- bin/reth/src/commands/import_op.rs | 48 ++++++------------------ bin/reth/src/commands/import_receipts.rs | 5 ++- 2 files changed, 15 insertions(+), 38 deletions(-) diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index 8ca1baf5b..5362b45b0 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -1,8 +1,9 @@ -//! Command that initializes the node by importing a chain from a file. +//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a +//! file. use crate::{ args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + utils::{genesis_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, }, commands::import::{build_import_pipeline, load_config}, @@ -20,7 +21,7 @@ use reth_downloaders::file_client::{ use reth_node_core::init::init_genesis; -use reth_primitives::{hex, stage::StageId, ChainSpec, PruneModes, TxHash}; +use reth_primitives::{hex, stage::StageId, PruneModes, TxHash}; use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -44,18 +45,6 @@ pub struct ImportOpCommand { #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] datadir: MaybePlatformPath, - /// The chain this node is running. - /// - /// Possible values are either a built-in chain or the path to a chain specification file. - #[arg( - long, - value_name = "CHAIN_OR_PATH", - long_help = chain_help(), - default_value = SUPPORTED_CHAINS[0], - value_parser = genesis_value_parser - )] - chain: Arc, - /// Chunk byte length. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, @@ -85,8 +74,10 @@ impl ImportOpCommand { "Chunking chain import" ); + let chain_spec = genesis_value_parser(SUPPORTED_CHAINS[0])?; + // add network name to data dir - let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let data_dir = self.datadir.unwrap_or_chain_default(chain_spec.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = load_config(config_path.clone())?; @@ -101,15 +92,16 @@ impl ImportOpCommand { info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; + ProviderFactory::new(db.clone(), chain_spec.clone(), data_dir.static_files())?; - debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); + debug!(target: "reth::cli", chain=%chain_spec.chain, genesis=?chain_spec.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; - let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file @@ -256,21 +248,3 @@ pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool { } false } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_common_import_command_chain_args() { - for chain in SUPPORTED_CHAINS { - let args: ImportOpCommand = - ImportOpCommand::parse_from(["reth", "--chain", chain, "."]); - assert_eq!( - Ok(args.chain.chain), - chain.parse::(), - "failed to parse chain {chain}" - ); - } - } -} diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs index 8e06c3c03..2686bcf4e 100644 --- a/bin/reth/src/commands/import_receipts.rs +++ b/bin/reth/src/commands/import_receipts.rs @@ -55,7 +55,10 @@ pub struct ImportReceiptsCommand { #[command(flatten)] db: DatabaseArgs, - /// The path to a receipts file for import. + /// The path to a receipts file for import. File must use `HackReceiptCodec` (used for + /// exporting OP chain segment below Bedrock block via testinprod/op-geth). + /// + /// #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] path: PathBuf, } From db868208f300d1f9d794950d69965bcb0621117e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 May 2024 21:34:59 +0200 Subject: [PATCH 239/250] feat: introduce statewriter trait (#8160) --- .../commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- bin/reth/src/commands/import_receipts.rs | 2 +- crates/node-core/src/init.rs | 2 +- crates/stages/src/stages/execution.rs | 3 ++- .../bundle_state_with_receipts.rs | 12 ++++------- .../src/providers/database/provider.rs | 4 ++-- crates/storage/provider/src/traits/mod.rs | 2 +- crates/storage/provider/src/traits/state.rs | 21 ++++++++++++++++++- 9 files changed, 33 insertions(+), 17 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 008530c53..f51426015 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -23,7 +23,7 @@ use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec, Receipts use reth_provider::{ AccountExtReader, BundleStateWithReceipts, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, StorageReader, + StateWriter, StaticFileProviderFactory, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_tasks::TaskExecutor; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 3d94a3a43..f452e2e52 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -24,7 +24,7 @@ use reth_network_api::NetworkInfo; use reth_primitives::{fs, stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; use reth_provider::{ BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs index 2686bcf4e..e6aae327a 100644 --- a/bin/reth/src/commands/import_receipts.rs +++ b/bin/reth/src/commands/import_receipts.rs @@ -17,7 +17,7 @@ use reth_node_core::version::SHORT_VERSION; use reth_primitives::{stage::StageId, ChainSpec, StaticFileSegment}; use reth_provider::{ BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, StaticFileWriter, + StateWriter, StaticFileProviderFactory, StaticFileWriter, }; use tracing::{debug, error, info}; diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index b09e29e53..6d924b6b1 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -15,7 +15,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, StageCheckpointWriter, StaticFileProviderFactory, + ProviderFactory, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, }; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; use serde::{Deserialize, Serialize}; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 6d2eb2a5d..9d8cf6ac6 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -14,7 +14,8 @@ use reth_primitives::{ use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, BlockReader, BundleStateWithReceipts, Chain, DatabaseProviderRW, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, TransactionVariant, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, StatsReader, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index a57f18f11..5f6d4af3f 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -1,4 +1,4 @@ -use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts}; +use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts, StateWriter}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, @@ -309,14 +309,10 @@ impl BundleStateWithReceipts { // swap bundles std::mem::swap(&mut self.bundle, &mut other) } +} - /// Write the [BundleStateWithReceipts] to database and receipts to either database or static - /// files if `static_file_producer` is `Some`. It should be none if there is any kind of - /// pruning/filtering over the receipts. - /// - /// `omit_changed_check` should be set to true if bundle has some of its data detached. This - /// would make some original values not known. - pub fn write_to_storage( +impl StateWriter for BundleStateWithReceipts { + fn write_to_storage( self, tx: &TX, mut static_file_producer: Option>, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 428645f1a..6e07b7c46 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,8 +9,8 @@ use crate::{ Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, - StageCheckpointReader, StateProviderBox, StatsReader, StorageReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, StorageReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 9aa96bccf..c966cd9ef 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -36,7 +36,7 @@ pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; mod state; pub use state::{ BlockchainTreePendingStateProvider, BundleStateDataProvider, StateProvider, StateProviderBox, - StateProviderFactory, + StateProviderFactory, StateWriter, }; mod trie; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index b5251ca75..4cb74dec6 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,11 +1,16 @@ use super::AccountReader; -use crate::{BlockHashReader, BlockIdReader, BundleStateWithReceipts, StateRootProvider}; +use crate::{ + providers::StaticFileProviderRWRefMut, BlockHashReader, BlockIdReader, BundleStateWithReceipts, + StateRootProvider, +}; use auto_impl::auto_impl; +use reth_db::transaction::{DbTx, DbTxMut}; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, }; +use revm::db::OriginalValuesKnown; /// Type alias of boxed [StateProvider]. pub type StateProviderBox = Box; @@ -226,3 +231,17 @@ pub trait BundleStateDataProvider: Send + Sync { /// Needed to create state provider. fn canonical_fork(&self) -> BlockNumHash; } + +/// A helper trait for [BundleStateWithReceipts] to write state and receipts to storage. +pub trait StateWriter { + /// Write the data and receipts to the database or static files if `static_file_producer` is + /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. + fn write_to_storage( + self, + tx: &TX, + static_file_producer: Option>, + is_value_known: OriginalValuesKnown, + ) -> ProviderResult<()> + where + TX: DbTxMut + DbTx; +} From dd7c0214702829c2b0e243286f5d0c2ca05d0874 Mon Sep 17 00:00:00 2001 From: Andrzej Sulkowski <111314156+andrzejSulkowski@users.noreply.github.com> Date: Wed, 8 May 2024 22:45:12 +0200 Subject: [PATCH 240/250] feat: feature gate rpc-types import for alloy conversions (#7963) Co-authored-by: Matthias Seitz --- Cargo.lock | 236 +++++++++++------------ crates/primitives/Cargo.toml | 10 +- crates/primitives/src/block.rs | 24 +-- crates/primitives/src/header.rs | 10 +- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/net.rs | 11 +- crates/primitives/src/serde_helper.rs | 3 - crates/primitives/src/transaction/mod.rs | 20 +- crates/primitives/src/withdrawal.rs | 5 +- crates/rpc/rpc-api/src/eth.rs | 11 +- crates/rpc/rpc/src/eth/api/server.rs | 15 +- crates/rpc/rpc/src/eth/api/state.rs | 6 +- 12 files changed, 170 insertions(+), 183 deletions(-) delete mode 100644 crates/primitives/src/serde_helper.rs diff --git a/Cargo.lock b/Cargo.lock index 164865acb..3e99592ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -222,7 +222,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", @@ -490,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy#0bb7604f186a78cdee911fa7fbc0ca36465a6902" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "serde", @@ -591,7 +591,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", - "base64 0.22.1", + "base64 0.22.0", "futures-util", "futures-utils-wasm", "serde", @@ -628,7 +628,7 @@ dependencies = [ "arbitrary", "derive_arbitrary", "derive_more", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "nybbles", "proptest", "proptest-derive", @@ -660,48 +660,47 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -1028,9 +1027,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backon" @@ -1085,9 +1084,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" [[package]] name = "base64ct" @@ -1300,7 +1299,7 @@ dependencies = [ "cfg-if", "dashmap", "fast-float", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "icu_normalizer", "indexmap 2.2.6", "intrusive-collections", @@ -1335,7 +1334,7 @@ checksum = "c055ef3cd87ea7db014779195bc90c6adfc35de4902e3b2fe587adecbd384578" dependencies = [ "boa_macros", "boa_profiler", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "thin-vec", ] @@ -1347,7 +1346,7 @@ checksum = "0cacc9caf022d92195c827a3e5bf83f96089d4bfaff834b359ac7b6be46e9187" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "indexmap 2.2.6", "once_cell", "phf", @@ -1559,9 +1558,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.96" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", @@ -1749,9 +1748,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" @@ -2328,7 +2327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core 0.9.10", @@ -2336,15 +2335,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" -version = "0.1.15" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2352,9 +2351,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" dependencies = [ "data-encoding", "syn 1.0.109", @@ -3109,9 +3108,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" dependencies = [ "crc32fast", "miniz_oxide", @@ -3473,9 +3472,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", @@ -3488,7 +3487,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -3497,7 +3496,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -3735,7 +3734,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3821,7 +3820,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.6", "tokio", "tower", "tower-service", @@ -4141,7 +4140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "serde", ] @@ -4236,7 +4235,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -4269,12 +4268,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "is_terminal_polyfill" -version = "1.70.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" - [[package]] name = "itertools" version = "0.10.5" @@ -4319,9 +4312,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" +checksum = "c4b0e68d9af1f066c06d6e2397583795b912d78537d7d907c561e82c13d69fa1" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4337,9 +4330,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" +checksum = "92f254f56af1ae84815b9b1325094743dcf05b92abb5e94da2e81a35cff0cada" dependencies = [ "futures-channel", "futures-util", @@ -4361,9 +4354,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" +checksum = "274d68152c24aa78977243bb56f28d7946e6aa309945b37d33174a3f92d89a3a" dependencies = [ "anyhow", "async-trait", @@ -4387,9 +4380,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" +checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" dependencies = [ "async-trait", "hyper 0.14.28", @@ -4407,9 +4400,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" +checksum = "2c326f9e95aeff7d707b2ffde72c22a52acc975ba1c48587776c02b90c4747a6" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.1.0", @@ -4420,9 +4413,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" +checksum = "3b5bfbda5f8fb63f997102fd18f73e35e34c84c6dcdbdbbe72c6e48f6d2c959b" dependencies = [ "futures-util", "http 0.2.12", @@ -4444,9 +4437,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" +checksum = "3dc828e537868d6b12bbb07ec20324909a22ced6efca0057c825c3e1126b2c6d" dependencies = [ "anyhow", "beef", @@ -4457,9 +4450,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f448d8eacd945cc17b6c0b42c361531ca36a962ee186342a97cdb8fca679cd77" +checksum = "7cf8dcee48f383e24957e238240f997ec317ba358b4e6d2e8be3f745bcdabdb5" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4468,9 +4461,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.5" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" +checksum = "32f00abe918bf34b785f87459b9205790e5361a3f7437adb50e928dc243f27eb" dependencies = [ "http 0.2.12", "jsonrpsee-client-transport", @@ -4558,9 +4551,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.154" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libffi" @@ -4694,9 +4687,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.2" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" +checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" dependencies = [ "either", "fnv", @@ -4705,7 +4698,6 @@ dependencies = [ "instant", "libp2p-core", "libp2p-identity", - "lru", "multistream-select", "once_cell", "rand 0.8.5", @@ -4843,7 +4835,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -5331,9 +5323,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.45" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ "autocfg", "num-integer", @@ -5354,9 +5346,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -5387,7 +5379,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 2.0.60", @@ -5592,7 +5584,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "serde", ] @@ -5604,9 +5596,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.10" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -6294,7 +6286,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", "memchr", ] @@ -6342,7 +6334,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "bytes", "futures-core", "futures-util", @@ -7553,6 +7545,7 @@ dependencies = [ "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-trie", "arbitrary", "assert_matches", @@ -7576,7 +7569,6 @@ dependencies = [ "reth-codecs", "reth-ethereum-forks", "reth-network-types", - "reth-rpc-types", "revm", "revm-primitives", "roaring", @@ -8136,7 +8128,7 @@ dependencies = [ "derive_more", "dyn-clone", "enumn", - "hashbrown 0.14.5", + "hashbrown 0.14.3", "hex", "once_cell", "serde", @@ -8221,9 +8213,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.4" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26f4c25a604fcb3a1bcd96dd6ba37c93840de95de8198d94c0d571a74a804d1" +checksum = "a1c77081a55300e016cb86f2864415b7518741879db925b8d488a0ee0d2da6bf" dependencies = [ "bytemuck", "byteorder", @@ -8418,7 +8410,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "rustls-pki-types", ] @@ -8501,9 +8493,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" +checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" dependencies = [ "sdd", ] @@ -8642,9 +8634,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.200" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] @@ -8660,9 +8652,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.200" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", @@ -8715,11 +8707,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" dependencies = [ - "base64 0.22.1", + "base64 0.22.0", "chrono", "hex", "indexmap 1.9.3", @@ -8733,9 +8725,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" +checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8981,9 +8973,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9269,9 +9261,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8224048089fb4c76b0569e76e00bf6cdaf06790eb5290e9582a0c485094e0a8" +checksum = "b139530208017f9d5a113784ed09cf1b8b22dee95eb99d51d89af1a3c2d6594e" dependencies = [ "serde", "test-fuzz-internal", @@ -9281,9 +9273,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43cd6c1a291bd5f843f5dfb813c2fd7ad8e38de06722a14eeb54636c983485cc" +checksum = "16e78ed8148311b6a02578dee5fd77600bf8805b77b2cb8382a9435348080985" dependencies = [ "bincode", "cargo_metadata", @@ -9292,9 +9284,9 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffbe4466c9f941baa7dd177856ebda245d08b2aa2e3b6890d6dd8c54d6ceebe" +checksum = "17f9bc8c69f276df24e4d1c082e52ea057544495916c4aa0708b82e47f55f364" dependencies = [ "darling 0.20.8", "itertools 0.12.1", @@ -9307,9 +9299,9 @@ dependencies = [ [[package]] name = "test-fuzz-runtime" -version = "5.1.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc507e8ea4887c091e1a57b65458c57b3a8fce1b6ed53afee77a174cfe41c17" +checksum = "2b657ccc932fde05dbac5c460bffa40809937adaa5558863fe8174526e1b3bc9" dependencies = [ "hex", "num-traits", @@ -9492,7 +9484,7 @@ dependencies = [ "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -9543,9 +9535,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -9554,6 +9546,7 @@ dependencies = [ "pin-project-lite", "slab", "tokio", + "tracing", ] [[package]] @@ -9906,11 +9899,12 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.93" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0e5d82932dfbf36df38de5df0cfe846d13430b3ae3fdc48b2e91ed692c8df7" +checksum = "8ad7eb6319ebadebca3dacf1f85a93bc54b73dd81b9036795f73de7ddfe27d5a" dependencies = [ "glob", + "once_cell", "serde", "serde_derive", "serde_json", @@ -10572,18 +10566,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.33" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.33" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 8d31358d9..04b25c7ca 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,7 +16,6 @@ workspace = true reth-codecs.workspace = true reth-ethereum-forks.workspace = true reth-network-types.workspace = true -reth-rpc-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } @@ -25,9 +24,10 @@ alloy-chains = { workspace = true, features = ["serde", "rlp"] } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie = { workspace = true, features = ["serde"] } -nybbles = { workspace = true, features = ["serde", "rlp"] } +alloy-rpc-types = { workspace = true, optional = true } alloy-genesis.workspace = true -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["serde"] } +nybbles = { workspace = true, features = ["serde", "rlp"] } # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -66,7 +66,9 @@ revm-primitives = { workspace = true, features = ["arbitrary"] } nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } + assert_matches.workspace = true +arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true rand.workspace = true @@ -91,7 +93,6 @@ default = ["c-kzg", "zstd-codec"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "revm-primitives/arbitrary", - "reth-rpc-types/arbitrary", "reth-ethereum-forks/arbitrary", "nybbles/arbitrary", "alloy-trie/arbitrary", @@ -110,6 +111,7 @@ optimism = [ "reth-ethereum-forks/optimism", "revm/optimism", ] +alloy-compat = ["alloy-rpc-types"] test-utils = ["dep:plain_hasher", "dep:hash-db"] [[bench]] diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 4b4831b9c..8a029dc05 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,16 +1,15 @@ use crate::{ - Address, Bytes, GotExpected, Header, SealedHeader, Signature, TransactionSigned, + Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; use alloy_rlp::{RlpDecodable, RlpEncodable}; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::{any, prop_compose}; use reth_codecs::derive_arbitrary; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::ops::Deref; -pub use reth_rpc_types::{ +pub use alloy_eips::eip1898::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, }; @@ -148,33 +147,36 @@ impl Deref for Block { } } -impl TryFrom for Block { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Block { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(block: alloy_rpc_types::Block) -> Result { + use alloy_rpc_types::ConversionError; - fn try_from(block: reth_rpc_types::Block) -> Result { let body = { let transactions: Result, ConversionError> = match block .transactions { - reth_rpc_types::BlockTransactions::Full(transactions) => transactions + alloy_rpc_types::BlockTransactions::Full(transactions) => transactions .into_iter() .map(|tx| { let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; Ok(TransactionSigned::from_transaction_and_signature( tx.try_into()?, - Signature { + crate::Signature { r: signature.r, s: signature.s, odd_y_parity: signature .y_parity - .unwrap_or(reth_rpc_types::Parity(false)) + .unwrap_or(alloy_rpc_types::Parity(false)) .0, }, )) }) .collect(), - reth_rpc_types::BlockTransactions::Hashes(_) | - reth_rpc_types::BlockTransactions::Uncle => { + alloy_rpc_types::BlockTransactions::Hashes(_) | + alloy_rpc_types::BlockTransactions::Uncle => { return Err(ConversionError::MissingFullTransactions) } }; diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index a06be2625..d0bd5baf8 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -16,7 +16,6 @@ use bytes::BufMut; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::*; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::{mem, ops::Deref}; @@ -486,10 +485,13 @@ impl Decodable for Header { } } -impl TryFrom for Header { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Header { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(header: alloy_rpc_types::Header) -> Result { + use alloy_rpc_types::ConversionError; - fn try_from(header: reth_rpc_types::Header) -> Result { Ok(Self { base_fee_per_gas: header .base_fee_per_gas diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2cd71ae20..27c66e69e 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -4,6 +4,7 @@ //! //! ## Feature Flags //! +//! - `alloy-compat`: Adds compatibility conversions for certain alloy types. //! - `arbitrary`: Adds `proptest` and `arbitrary` support for primitive types. //! - `test-utils`: Export utilities for testing @@ -38,7 +39,6 @@ mod prune; mod receipt; /// Helpers for working with revm pub mod revm; -pub mod serde_helper; pub mod stage; pub mod static_file; mod storage; diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 778e2658b..dcb10545f 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -120,15 +120,10 @@ pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec(&node).expect("couldn't serialize"); assert_eq!(ser, "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"") @@ -246,7 +241,7 @@ mod tests { address: IpAddr::V4([10, 3, 58, 6].into()), tcp_port: 30303u16, udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), }) } } diff --git a/crates/primitives/src/serde_helper.rs b/crates/primitives/src/serde_helper.rs deleted file mode 100644 index b0d041fdc..000000000 --- a/crates/primitives/src/serde_helper.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! [serde] utilities. - -pub use reth_rpc_types::serde_helpers::*; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 95407537b..eda139ffd 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -2,7 +2,6 @@ use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, TxKind, B256, U256}; -use alloy_eips::eip2718::Eip2718Error; use alloy_rlp::{ Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; @@ -11,7 +10,6 @@ use derive_more::{AsRef, Deref}; use once_cell::sync::Lazy; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::mem; @@ -614,10 +612,14 @@ impl From for Transaction { } } -impl TryFrom for Transaction { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Transaction { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_eips::eip2718::Eip2718Error; + use alloy_rpc_types::ConversionError; - fn try_from(tx: reth_rpc_types::Transaction) -> Result { match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( tx.transaction_type.unwrap(), @@ -1717,10 +1719,12 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } -impl TryFrom for TransactionSignedEcRecovered { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for TransactionSignedEcRecovered { + type Error = alloy_rpc_types::ConversionError; - fn try_from(tx: reth_rpc_types::Transaction) -> Result { + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_rpc_types::ConversionError; let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; let transaction: Transaction = tx.try_into()?; diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index e47b2816a..e4d1b37c0 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -85,7 +85,7 @@ impl From> for Withdrawals { #[cfg(test)] mod tests { use super::*; - use crate::{serde_helper::u64_via_ruint, Address}; + use crate::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; @@ -95,15 +95,12 @@ mod tests { #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] struct RethWithdrawal { /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_via_ruint")] index: u64, /// Index of validator associated with withdrawal. - #[serde(with = "u64_via_ruint", rename = "validatorIndex")] validator_index: u64, /// Target address for withdrawn ether. address: Address, /// Value of the withdrawal in gwei. - #[serde(with = "u64_via_ruint")] amount: u64, } diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs index 8811ef87d..b6c2993bb 100644 --- a/crates/rpc/rpc-api/src/eth.rs +++ b/crates/rpc/rpc-api/src/eth.rs @@ -1,11 +1,10 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{ - serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_rpc_types::{ - state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, Transaction, TransactionRequest, Work, + serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, + AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, + FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, Transaction, + TransactionRequest, Work, }; /// Eth rpc interface: diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 7ba1539b8..a1796a71d 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -2,25 +2,22 @@ //! Handles RPC requests for the `eth_` namespace. use jsonrpsee::core::RpcResult as Result; -use serde_json::Value; -use tracing::trace; - use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{ - serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_provider::{ BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, }; use reth_rpc_api::EthApiServer; use reth_rpc_types::{ - state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, TransactionRequest, Work, + serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, + AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, + FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, TransactionRequest, Work, }; use reth_transaction_pool::TransactionPool; +use serde_json::Value; +use tracing::trace; use crate::{ eth::{ diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 7f0bdd4e2..144b1504f 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -5,13 +5,11 @@ use crate::{ EthApi, }; use reth_evm::ConfigureEvm; -use reth_primitives::{ - serde_helper::JsonStorageKey, Address, BlockId, BlockNumberOrTag, Bytes, B256, U256, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, U256}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; -use reth_rpc_types::EIP1186AccountProofResponse; +use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::{PoolTransaction, TransactionPool}; From d46774411fee0802f4390e4f04b7184cdcdb3ea2 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 May 2024 17:28:53 -0400 Subject: [PATCH 241/250] release: v0.2.0-beta.7 (#8164) --- Cargo.lock | 148 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 75 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e99592ed..621a84ed2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2678,7 +2678,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "rayon", @@ -6382,7 +6382,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "ahash", "alloy-rlp", @@ -6459,7 +6459,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -6481,7 +6481,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "futures-core", @@ -6503,7 +6503,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "futures", @@ -6545,7 +6545,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "assert_matches", @@ -6571,7 +6571,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-tasks", "tokio", @@ -6580,7 +6580,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -6598,7 +6598,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6609,7 +6609,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "confy", "humantime-serde", @@ -6625,7 +6625,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "auto_impl", "reth-primitives", @@ -6634,7 +6634,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "mockall", "reth-consensus", @@ -6645,7 +6645,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "arbitrary", "assert_matches", @@ -6682,7 +6682,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -6706,7 +6706,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "derive_more", @@ -6730,7 +6730,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "data-encoding", @@ -6755,7 +6755,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -6786,7 +6786,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", @@ -6816,7 +6816,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aes 0.8.4", "alloy-rlp", @@ -6848,7 +6848,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -6858,7 +6858,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "arbitrary", @@ -6893,7 +6893,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "arbitrary", @@ -6916,7 +6916,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus", "reth-consensus-common", @@ -6925,7 +6925,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "reth-engine-primitives", @@ -6940,7 +6940,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6955,7 +6955,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-basic-payload-builder", "reth-payload-builder", @@ -6969,7 +6969,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "rayon", "reth-db", @@ -6979,7 +6979,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "parking_lot 0.12.2", @@ -6991,7 +6991,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-evm", "reth-interfaces", @@ -7003,7 +7003,7 @@ dependencies = [ [[package]] name = "reth-evm-optimism" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-evm", "reth-interfaces", @@ -7018,7 +7018,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "eyre", "metrics", @@ -7038,7 +7038,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "auto_impl", "clap", @@ -7058,7 +7058,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "async-trait", "bytes", @@ -7080,7 +7080,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "bitflags 2.5.0", "byteorder", @@ -7102,7 +7102,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "bindgen", "cc", @@ -7111,7 +7111,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "metrics", @@ -7122,7 +7122,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "metrics", "once_cell", @@ -7136,7 +7136,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "pin-project", "reth-network-types", @@ -7145,7 +7145,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "igd-next", "pin-project-lite", @@ -7159,7 +7159,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -7216,7 +7216,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "enr", "reth-discv4", @@ -7231,7 +7231,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7246,7 +7246,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "anyhow", "bincode", @@ -7267,7 +7267,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-db", "reth-engine-primitives", @@ -7281,7 +7281,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "confy", @@ -7321,7 +7321,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "clap", @@ -7385,7 +7385,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "eyre", "futures", @@ -7413,7 +7413,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "humantime", @@ -7434,7 +7434,7 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "async-trait", @@ -7476,7 +7476,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus", "reth-consensus-common", @@ -7485,7 +7485,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -7507,7 +7507,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "metrics", @@ -7529,7 +7529,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -7538,7 +7538,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-chains", "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7618,7 +7618,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "derive_more", @@ -7642,7 +7642,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus-common", "reth-interfaces", @@ -7655,7 +7655,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -7710,7 +7710,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "jsonrpsee", "reth-engine-primitives", @@ -7723,7 +7723,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "jsonrpsee", @@ -7737,7 +7737,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "hyper 0.14.28", "jsonrpsee", @@ -7775,7 +7775,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7803,7 +7803,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -7828,7 +7828,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", @@ -7839,7 +7839,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7877,7 +7877,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "assert_matches", @@ -7900,7 +7900,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "parking_lot 0.12.2", @@ -7919,7 +7919,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "dyn-clone", "futures-util", @@ -7935,7 +7935,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", @@ -7944,7 +7944,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "tokio", "tokio-stream", @@ -7952,7 +7952,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "clap", "eyre", @@ -7966,7 +7966,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "aquamarine", @@ -8005,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "auto_impl", @@ -8031,7 +8031,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "criterion", diff --git a/Cargo.toml b/Cargo.toml index b45009795..fe219f51b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,7 +165,7 @@ unnecessary_struct_initialization = "allow" use_self = "allow" [workspace.package] -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" edition = "2021" rust-version = "1.76" license = "MIT OR Apache-2.0" From aa07257d3ba071188b7b7e372856b50be094e8ba Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 May 2024 18:22:35 -0400 Subject: [PATCH 242/250] fix: use op-build instead of build-op in release workflow (#8167) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8562da807..2b546a6fc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,7 +31,7 @@ jobs: needs: extract-version strategy: matrix: - build: [{command: build, binary: reth}, {command: build-op, binary: op-reth}] + build: [{command: build, binary: reth}, {command: op-build, binary: op-reth}] include: - target: x86_64-unknown-linux-gnu os: ubuntu-20.04 From 1184e8c45b5676e1516844bba18996cdf1562654 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 9 May 2024 13:04:14 +0100 Subject: [PATCH 243/250] chore: use `NoopBodiesDownloader` & `NoopHeaderDownloader` on `stage unwind` command (#8165) --- bin/reth/src/commands/stage/unwind.rs | 46 +++------------------- crates/net/downloaders/src/bodies/mod.rs | 3 ++ crates/net/downloaders/src/bodies/noop.rs | 29 ++++++++++++++ crates/net/downloaders/src/headers/mod.rs | 3 ++ crates/net/downloaders/src/headers/noop.rs | 30 ++++++++++++++ 5 files changed, 70 insertions(+), 41 deletions(-) create mode 100644 crates/net/downloaders/src/bodies/noop.rs create mode 100644 crates/net/downloaders/src/headers/noop.rs diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index b7998d087..3a6597499 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -5,15 +5,9 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{Config, PruneConfig}; use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; +use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_exex::ExExManagerHandle; -use reth_node_core::{ - args::{get_secret_key, NetworkArgs}, - dirs::ChainPath, -}; +use reth_node_core::args::NetworkArgs; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, @@ -110,8 +104,7 @@ impl Command { .filter(|highest_static_file_block| highest_static_file_block >= range.start()) { info!(target: "reth::cli", ?range, ?highest_static_block, "Executing a pipeline unwind."); - let mut pipeline = - self.build_pipeline(data_dir, config, provider_factory.clone()).await?; + let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; // Move all applicable data from database to static files. pipeline.produce_static_files()?; @@ -142,40 +135,11 @@ impl Command { async fn build_pipeline( self, - data_dir: ChainPath, config: Config, provider_factory: ProviderFactory>, ) -> Result>, eyre::Error> { - // Even though we are not planning to download anything, we need to initialize Body and - // Header stage with a network client - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers(); - let network = self - .network - .network_config( - &config, - provider_factory.chain_spec(), - p2p_secret_key, - default_peers_path, - ) - .build(provider_factory.clone()) - .start_network() - .await?; - let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - // building network downloaders using the fetch client - let fetch_client = network.fetch_client().await?; - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(fetch_client.clone(), Arc::clone(&consensus)); - let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies).build( - fetch_client, - Arc::clone(&consensus), - provider_factory.clone(), - ); let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); @@ -189,8 +153,8 @@ impl Command { provider_factory.clone(), header_mode, Arc::clone(&consensus), - header_downloader, - body_downloader, + NoopHeaderDownloader::default(), + NoopBodiesDownloader::default(), executor.clone(), stage_conf.etl.clone(), ) diff --git a/crates/net/downloaders/src/bodies/mod.rs b/crates/net/downloaders/src/bodies/mod.rs index f8931ea81..d4f613413 100644 --- a/crates/net/downloaders/src/bodies/mod.rs +++ b/crates/net/downloaders/src/bodies/mod.rs @@ -2,6 +2,9 @@ #[allow(clippy::module_inception)] pub mod bodies; +/// A body downloader that does nothing. Useful to build unwind-only pipelines. +pub mod noop; + /// A downloader implementation that spawns a downloader to a task pub mod task; diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs new file mode 100644 index 000000000..5885a17c1 --- /dev/null +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -0,0 +1,29 @@ +use futures::Stream; +use reth_interfaces::p2p::{ + bodies::{downloader::BodyDownloader, response::BlockResponse}, + error::{DownloadError, DownloadResult}, +}; +use reth_primitives::BlockNumber; +use std::ops::RangeInclusive; + +/// A [BodyDownloader] implementation that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopBodiesDownloader; + +impl BodyDownloader for NoopBodiesDownloader { + fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { + Ok(()) + } +} + +impl Stream for NoopBodiesDownloader { + type Item = Result, DownloadError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + panic!("NoopBodiesDownloader shouldn't be polled.") + } +} diff --git a/crates/net/downloaders/src/headers/mod.rs b/crates/net/downloaders/src/headers/mod.rs index 4321ef52b..a261f5579 100644 --- a/crates/net/downloaders/src/headers/mod.rs +++ b/crates/net/downloaders/src/headers/mod.rs @@ -1,6 +1,9 @@ /// A Linear downloader implementation. pub mod reverse_headers; +/// A header downloader that does nothing. Useful to build unwind-only pipelines. +pub mod noop; + /// A downloader implementation that spawns a downloader to a task pub mod task; diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs new file mode 100644 index 000000000..8127cc232 --- /dev/null +++ b/crates/net/downloaders/src/headers/noop.rs @@ -0,0 +1,30 @@ +use futures::Stream; +use reth_interfaces::p2p::headers::{ + downloader::{HeaderDownloader, SyncTarget}, + error::HeadersDownloaderError, +}; +use reth_primitives::SealedHeader; + +/// A [HeaderDownloader] implementation that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopHeaderDownloader; + +impl HeaderDownloader for NoopHeaderDownloader { + fn update_local_head(&mut self, _: SealedHeader) {} + + fn update_sync_target(&mut self, _: SyncTarget) {} + + fn set_batch_size(&mut self, _: usize) {} +} + +impl Stream for NoopHeaderDownloader { + type Item = Result, HeadersDownloaderError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + panic!("NoopHeaderDownloader shouldn't be polled.") + } +} From 539c70256145f0a126fde406ef14d50fbd8f9589 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 12:08:32 -0400 Subject: [PATCH 244/250] fix: use configs object list in matrix to release all platforms (#8179) --- .github/workflows/release.yml | 67 ++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2b546a6fc..9bb9f4d93 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,50 +27,61 @@ jobs: build: name: build release - runs-on: ${{ matrix.os }} + runs-on: ${{ matrix.configs.os }} needs: extract-version strategy: matrix: + configs: [ + { + target: x86_64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + }, + { + target: aarch64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + }, + { + target: x86_64-apple-darwin + os: macos-13 + profile: maxperf + }, + { + target: aarch64-apple-darwin + os: macos-14 + profile: maxperf + }, + { + target: x86_64-pc-windows-gnu + os: ubuntu-20.04 + profile: maxperf + }, + ] build: [{command: build, binary: reth}, {command: op-build, binary: op-reth}] - include: - - target: x86_64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - - target: aarch64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - - target: x86_64-apple-darwin - os: macos-13 - profile: maxperf - - target: aarch64-apple-darwin - os: macos-14 - profile: maxperf - - target: x86_64-pc-windows-gnu - os: ubuntu-20.04 - profile: maxperf steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: - target: ${{ matrix.target }} + target: ${{ matrix.configs.target }} - uses: taiki-e/install-action@cross - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - name: Apple M1 setup - if: matrix.target == 'aarch64-apple-darwin' + if: matrix.configs.target == 'aarch64-apple-darwin' run: | echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV - name: Build Reth - run: make PROFILE=${{ matrix.profile }} ${{ matrix.build.command }}-${{ matrix.target }} + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - name: Move binary run: | mkdir artifacts - [[ "${{ matrix.target }}" == *windows* ]] && ext=".exe" - mv "target/${{ matrix.target }}/${{ matrix.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts + [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -80,22 +91,22 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz ${{ matrix.build.binary }}* - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}* + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz mv *tar.gz* .. shell: bash - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - name: Upload signature uses: actions/upload-artifact@v4 with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc draft-release: name: draft release From 4bbc8509d6af7a412dbb117cae47a37e634c7ede Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 9 May 2024 17:43:26 +0100 Subject: [PATCH 245/250] chore(pruner): set default timeout to `None` on `PrunerBuilder` (#8181) --- crates/prune/src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 8a14ccf4a..879bd9fb9 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -102,7 +102,7 @@ impl Default for PrunerBuilder { segments: PruneModes::none(), max_reorg_depth: 64, prune_delete_limit: MAINNET.prune_delete_limit, - timeout: Some(Self::DEFAULT_TIMEOUT), + timeout: None, finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, } } From 87fee5e8be1983f0baf1647437dd2b098e279ace Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 13:00:42 -0400 Subject: [PATCH 246/250] fix: use yaml lists instead of objects in release.yml (#8182) --- .github/workflows/release.yml | 49 +++++++++++++++-------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9bb9f4d93..470b918b2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,34 +31,27 @@ jobs: needs: extract-version strategy: matrix: - configs: [ - { - target: x86_64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - }, - { - target: aarch64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - }, - { - target: x86_64-apple-darwin - os: macos-13 - profile: maxperf - }, - { - target: aarch64-apple-darwin - os: macos-14 - profile: maxperf - }, - { - target: x86_64-pc-windows-gnu - os: ubuntu-20.04 - profile: maxperf - }, - ] - build: [{command: build, binary: reth}, {command: op-build, binary: op-reth}] + configs: + - target: x86_64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + - target: aarch64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + - target: x86_64-apple-darwin + os: macos-13 + profile: maxperf + - target: aarch64-apple-darwin + os: macos-14 + profile: maxperf + - target: x86_64-pc-windows-gnu + os: ubuntu-20.04 + profile: maxperf + build: + - command: build + binary: reth + - command: op-build + binary: op-reth steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable From ee3c93916dfe8cf49d5d9f2be0e838279dd249d0 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 9 May 2024 19:29:42 +0200 Subject: [PATCH 247/250] chore(deps): remove outdated dev dep and comment (#8184) --- crates/primitives/Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 04b25c7ca..d9d6c592e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -82,11 +82,8 @@ plain_hasher = "0.2" sucds = "0.8.1" -# necessary so we don't hit a "undeclared 'std'": -# https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 criterion.workspace = true pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } -secp256k1.workspace = true [features] default = ["c-kzg", "zstd-codec"] From ad54af8406886367fec407750b678c421d39be96 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 9 May 2024 19:24:57 +0100 Subject: [PATCH 248/250] fix: copy and prune data from database with `move_to_static_files`, before a pipeline run/unwind (#8127) --- Cargo.lock | 2 +- bin/reth/Cargo.toml | 1 - bin/reth/src/commands/stage/unwind.rs | 12 +--- crates/primitives/src/prune/mode.rs | 1 + crates/prune/src/builder.rs | 3 +- crates/prune/src/error.rs | 13 +++++ crates/stages-api/Cargo.toml | 1 + crates/stages-api/src/pipeline/mod.rs | 56 ++++++++++++------- .../provider/src/providers/database/mod.rs | 19 +++++-- 9 files changed, 70 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 621a84ed2..6d9252332 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6434,7 +6434,6 @@ dependencies = [ "reth-payload-validator", "reth-primitives", "reth-provider", - "reth-prune", "reth-revm", "reth-rpc", "reth-rpc-api", @@ -7890,6 +7889,7 @@ dependencies = [ "reth-metrics", "reth-primitives", "reth-provider", + "reth-prune", "reth-static-file", "reth-tokio-util", "thiserror", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 37b26686f..c1ed8981a 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -43,7 +43,6 @@ reth-payload-builder.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-discv4.workspace = true -reth-prune.workspace = true reth-static-file = { workspace = true } reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 3a6597499..1f0c7fc45 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -2,7 +2,7 @@ use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; -use reth_config::{Config, PruneConfig}; +use reth_config::Config; use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -13,7 +13,6 @@ use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, StaticFileProviderFactory, }; -use reth_prune::PrunerBuilder; use reth_stages::{ sets::DefaultStages, stages::{ @@ -107,14 +106,7 @@ impl Command { let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; // Move all applicable data from database to static files. - pipeline.produce_static_files()?; - - // Run the pruner so we don't potentially end up with higher height in the database vs - // static files. - let mut pruner = PrunerBuilder::new(PruneConfig::default()) - .prune_delete_limit(usize::MAX) - .build(provider_factory); - pruner.run(*range.end())?; + pipeline.move_to_static_files()?; pipeline.unwind((*range.start()).saturating_sub(1), None)?; } else { diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index c32f66d35..3454573b9 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -36,6 +36,7 @@ impl PruneMode { PruneMode::Distance(distance) if *distance >= segment.min_blocks(purpose) => { Some((tip - distance, *self)) } + PruneMode::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)), PruneMode::Before(n) if *n > tip => None, // Nothing to prune yet PruneMode::Before(n) if tip - n >= segment.min_blocks(purpose) => Some((n - 1, *self)), _ => return Err(PruneSegmentError::Configuration(segment)), diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 879bd9fb9..4e0ffd21a 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -1,10 +1,9 @@ -use std::time::Duration; - use crate::{segments::SegmentSet, Pruner}; use reth_config::PruneConfig; use reth_db::database::Database; use reth_primitives::{FinishedExExHeight, PruneModes, MAINNET}; use reth_provider::ProviderFactory; +use std::time::Duration; use tokio::sync::watch; /// Contains the information required to build a pruner diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index e12320bc8..bdf5bacc1 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -21,3 +21,16 @@ pub enum PrunerError { #[error(transparent)] Provider(#[from] ProviderError), } + +impl From for RethError { + fn from(err: PrunerError) -> Self { + match err { + PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { + RethError::Custom(err.to_string()) + } + PrunerError::Interface(err) => err, + PrunerError::Database(err) => RethError::Database(err), + PrunerError::Provider(err) => RethError::Provider(err), + } + } +} diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index d1e31ba78..2101961fd 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -19,6 +19,7 @@ reth-interfaces.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true reth-consensus.workspace = true +reth-prune.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 199cc41e6..5aceb515b 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -15,6 +15,7 @@ use reth_provider::{ providers::StaticFileWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, }; +use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use reth_tokio_util::EventListeners; use std::pin::Pin; @@ -140,7 +141,7 @@ where match target { PipelineTarget::Sync(tip) => self.set_tip(tip), PipelineTarget::Unwind(target) => { - if let Err(err) = self.produce_static_files() { + if let Err(err) = self.move_to_static_files() { return (self, Err(err.into())) } if let Err(err) = self.unwind(target, None) { @@ -199,7 +200,7 @@ where /// pipeline (for example the `Finish` stage). Or [ControlFlow::Unwind] of the stage that caused /// the unwind. pub async fn run_loop(&mut self) -> Result { - self.produce_static_files()?; + self.move_to_static_files()?; let mut previous_stage = None; for stage_index in 0..self.stages.len() { @@ -236,9 +237,10 @@ where Ok(self.progress.next_ctrl()) } - /// Run [static file producer](StaticFileProducer) and move all data from the database to static - /// files for corresponding [segments](reth_primitives::static_file::StaticFileSegment), - /// according to their [stage checkpoints](StageCheckpoint): + /// Run [static file producer](StaticFileProducer) and [pruner](reth_prune::Pruner) to **move** + /// all data from the database to static files for corresponding + /// [segments](reth_primitives::static_file::StaticFileSegment), according to their [stage + /// checkpoints](StageCheckpoint): /// - [StaticFileSegment::Headers](reth_primitives::static_file::StaticFileSegment::Headers) -> /// [StageId::Headers] /// - [StaticFileSegment::Receipts](reth_primitives::static_file::StaticFileSegment::Receipts) @@ -248,22 +250,38 @@ where /// /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. - pub fn produce_static_files(&self) -> RethResult<()> { + pub fn move_to_static_files(&self) -> RethResult<()> { let mut static_file_producer = self.static_file_producer.lock(); - let provider = self.provider_factory.provider()?; - let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: provider - .get_stage_checkpoint(StageId::Headers)? - .map(|checkpoint| checkpoint.block_number), - receipts: provider - .get_stage_checkpoint(StageId::Execution)? - .map(|checkpoint| checkpoint.block_number), - transactions: provider - .get_stage_checkpoint(StageId::Bodies)? - .map(|checkpoint| checkpoint.block_number), - })?; - static_file_producer.run(targets)?; + // Copies data from database to static files + let lowest_static_file_height = { + let provider = self.provider_factory.provider()?; + let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] + .into_iter() + .map(|stage| { + provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number)) + }) + .collect::, _>>()?; + + let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { + headers: stages_checkpoints[0], + receipts: stages_checkpoints[1], + transactions: stages_checkpoints[2], + })?; + static_file_producer.run(targets)?; + stages_checkpoints.into_iter().min().expect("exists") + }; + + // Deletes data which has been copied to static files. + if let Some(prune_tip) = lowest_static_file_height { + // Run the pruner so we don't potentially end up with higher height in the database vs + // static files during a pipeline unwind + let mut pruner = PrunerBuilder::new(Default::default()) + .prune_delete_limit(usize::MAX) + .build(self.provider_factory.clone()); + + pruner.run(prune_tip)?; + } Ok(()) } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 1e2f73cbc..c84e9d8ce 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -34,10 +34,10 @@ use reth_db::mdbx::DatabaseArguments; /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ProviderFactory { /// Database - db: DB, + db: Arc, /// Chain spec chain_spec: Arc, /// Static File Provider @@ -52,7 +52,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult> { Ok(Self { - db, + db: Arc::new(db), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) @@ -71,7 +71,7 @@ impl ProviderFactory { #[cfg(any(test, feature = "test-utils"))] /// Consumes Self and returns DB - pub fn into_db(self) -> DB { + pub fn into_db(self) -> Arc { self.db } } @@ -86,7 +86,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult { Ok(ProviderFactory:: { - db: init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?, + db: Arc::new(init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) @@ -558,6 +558,15 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl Clone for ProviderFactory { + fn clone(&self) -> Self { + ProviderFactory { + db: Arc::clone(&self.db), + chain_spec: self.chain_spec.clone(), + static_file_provider: self.static_file_provider.clone(), + } + } +} #[cfg(test)] mod tests { use super::ProviderFactory; From e6fe864e70727ff376f490f1b56a01c25ef7ea5c Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 14:26:25 -0400 Subject: [PATCH 249/250] fix: use --show-sdk-platform-version instead of os-version (#8185) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 470b918b2..91f65d2bc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -66,7 +66,7 @@ jobs: if: matrix.configs.target == 'aarch64-apple-darwin' run: | echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV - echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV + echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Build Reth run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} From a44e0857373cee447c3823608194ccac53509140 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 May 2024 16:40:42 -0400 Subject: [PATCH 250/250] fix: do not use cross for x86_64 apple darwin (#8189) --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index f19a3cd8c..ada2149b8 100644 --- a/Makefile +++ b/Makefile @@ -95,6 +95,7 @@ op-build-aarch64-unknown-linux-gnu: export JEMALLOC_SYS_WITH_LG_PAGE=16 # No jemalloc on Windows build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES)) +op-build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES)) # Note: The additional rustc compiler flags are for intrinsics needed by MDBX. # See: https://github.com/cross-rs/cross/wiki/FAQ#undefined-reference-with-build-std @@ -116,6 +117,10 @@ build-x86_64-apple-darwin: $(MAKE) build-native-x86_64-apple-darwin build-aarch64-apple-darwin: $(MAKE) build-native-aarch64-apple-darwin +op-build-x86_64-apple-darwin: + $(MAKE) op-build-native-x86_64-apple-darwin +op-build-aarch64-apple-darwin: + $(MAKE) op-build-native-aarch64-apple-darwin # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary