diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index 2a5d73c0d..ecb1952e6 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -13,12 +13,12 @@ use std::{net::SocketAddr, time::Duration}; -use anyhow::Context; +use anyhow::{bail, Context}; use clap::Args; use rundler_builder::{ self, BloxrouteSenderArgs, BuilderEvent, BuilderEventKind, BuilderTask, BuilderTaskArgs, - EntryPointBuilderSettings, FlashbotsSenderArgs, LocalBuilderBuilder, TransactionSenderArgs, - TransactionSenderKind, + EntryPointBuilderSettings, FlashbotsSenderArgs, LocalBuilderBuilder, RawSenderArgs, + TransactionSenderArgs, TransactionSenderKind, }; use rundler_pool::RemotePoolClient; use rundler_sim::{MempoolConfigs, PriorityFeeMode}; @@ -57,6 +57,10 @@ pub struct BuilderArgs { host: String, /// Private key to use for signing transactions + /// DEPRECATED: Use `builder.private_keys` instead + /// + /// If both `builder.private_key` and `builder.private_keys` are set, `builder.private_key` is appended + /// to `builder.private_keys`. Keys must be unique. #[arg( long = "builder.private_key", name = "builder.private_key", @@ -64,6 +68,17 @@ pub struct BuilderArgs { )] private_key: Option, + /// Private keys to use for signing transactions + /// + /// Cannot use both `builder.private_key` and `builder.aws_kms_key_ids` at the same time. + #[arg( + long = "builder.private_keys", + name = "builder.private_keys", + env = "BUILDER_PRIVATE_KEYS", + value_delimiter = ',' + )] + private_keys: Vec, + /// AWS KMS key IDs to use for signing transactions #[arg( long = "builder.aws_kms_key_ids", @@ -115,7 +130,7 @@ pub struct BuilderArgs { /// If present, the url of the ETH provider that will be used to send /// transactions. Defaults to the value of `node_http`. /// - /// Only used when BUILDER_SENDER is "raw" or "conditional" + /// Only used when BUILDER_SENDER is "raw" #[arg( long = "builder.submit_url", name = "builder.submit_url", @@ -123,6 +138,40 @@ pub struct BuilderArgs { )] pub submit_url: Option, + /// If present, the url of the ETH provider that will be used to check + /// transaction status. Else will use the node http for status. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.use_submit_for_status", + name = "builder.use_submit_for_status", + env = "BUILDER_USE_SUBMIT_FOR_STATUS", + default_value = "false" + )] + pub use_submit_for_status: bool, + + /// Use the conditional RPC endpoint for transaction submission. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.use_conditional_rpc", + name = "builder.use_conditional_rpc", + env = "BUILDER_USE_CONDITIONAL_RPC", + default_value = "false" + )] + pub use_conditional_rpc: bool, + + /// If the "dropped" status is unsupported by the status provider. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.dropped_status_unsupported", + name = "builder.dropped_status_unsupported", + env = "BUILDER_DROPPED_STATUS_UNSUPPORTED", + default_value = "false" + )] + pub dropped_status_unsupported: bool, + /// A list of builders to pass into the Flashbots Relay RPC. /// /// Only used when BUILDER_SENDER is "flashbots" @@ -216,7 +265,6 @@ impl BuilderArgs { .node_http .clone() .context("should have a node HTTP URL")?; - let submit_url = self.submit_url.clone().unwrap_or_else(|| rpc_url.clone()); let mempool_configs = match &common.mempool_config_path { Some(path) => get_json_config::(path, &common.aws_region) @@ -251,27 +299,41 @@ impl BuilderArgs { num_builders += common.num_builders_v0_7; } - if self.private_key.is_some() { - if num_builders > 1 { - return Err(anyhow::anyhow!( - "Cannot use a private key with multiple builders. You may need to disable one of the entry points." - )); + if (self.private_key.is_some() || !self.private_keys.is_empty()) + && !self.aws_kms_key_ids.is_empty() + { + bail!( + "Cannot use both builder.private_key(s) and builder.aws_kms_key_ids at the same time." + ); + } + + let mut private_keys = self.private_keys.clone(); + if self.private_key.is_some() || !self.private_keys.is_empty() { + if let Some(pk) = &self.private_key { + private_keys.push(pk.clone()); + } + + if num_builders > private_keys.len() as u64 { + bail!( + "Found {} private keys, but need {} keys for the number of builders. You may need to disable one of the entry points.", + private_keys.len(), num_builders + ); } } else if self.aws_kms_key_ids.len() < num_builders as usize { - return Err(anyhow::anyhow!( + bail!( "Not enough AWS KMS key IDs for the number of builders. Need {} keys, found {}. You may need to disable one of the entry points.", num_builders, self.aws_kms_key_ids.len() - )); + ); } - let sender_args = self.sender_args(&chain_spec)?; + let sender_args = self.sender_args(&chain_spec, &rpc_url)?; Ok(BuilderTaskArgs { entry_points, chain_spec, unsafe_mode: common.unsafe_mode, rpc_url, - private_key: self.private_key.clone(), + private_keys, aws_kms_key_ids: self.aws_kms_key_ids.clone(), aws_kms_region: common .aws_region @@ -281,7 +343,6 @@ impl BuilderArgs { redis_lock_ttl_millis: self.redis_lock_ttl_millis, max_bundle_size: self.max_bundle_size, max_bundle_gas: common.max_bundle_gas, - submit_url, bundle_priority_fee_overhead_percent: common.bundle_priority_fee_overhead_percent, priority_fee_mode, sender_args, @@ -294,10 +355,18 @@ impl BuilderArgs { }) } - fn sender_args(&self, chain_spec: &ChainSpec) -> anyhow::Result { + fn sender_args( + &self, + chain_spec: &ChainSpec, + rpc_url: &str, + ) -> anyhow::Result { match self.sender_type { - TransactionSenderKind::Raw => Ok(TransactionSenderArgs::Raw), - TransactionSenderKind::Conditional => Ok(TransactionSenderArgs::Conditional), + TransactionSenderKind::Raw => Ok(TransactionSenderArgs::Raw(RawSenderArgs { + submit_url: self.submit_url.clone().unwrap_or_else(|| rpc_url.into()), + use_submit_for_status: self.use_submit_for_status, + dropped_status_supported: !self.dropped_status_unsupported, + use_conditional_rpc: self.use_conditional_rpc, + })), TransactionSenderKind::Flashbots => { if !chain_spec.flashbots_enabled { return Err(anyhow::anyhow!("Flashbots sender is not enabled for chain")); diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 01b554fde..17999cfcc 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -46,7 +46,7 @@ use rundler_utils::{emit::WithEntryPoint, math}; use tokio::{sync::broadcast, try_join}; use tracing::{error, info, warn}; -use crate::emit::{BuilderEvent, OpRejectionReason, SkipReason}; +use crate::emit::{BuilderEvent, ConditionNotMetReason, OpRejectionReason, SkipReason}; /// Extra buffer percent to add on the bundle transaction gas estimate to be sure it will be enough const BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT: u64 = 5; @@ -91,16 +91,29 @@ impl Bundle { } } -#[cfg_attr(test, automock(type UO = rundler_types::v0_6::UserOperation;))] #[async_trait] +#[cfg_attr(test, automock(type UO = rundler_types::v0_6::UserOperation;))] pub(crate) trait BundleProposer: Send + Sync + 'static { type UO: UserOperation; + /// Constructs the next bundle + /// + /// If `min_fees` is `Some`, the proposer will ensure the bundle has + /// at least `min_fees`. async fn make_bundle( - &self, - required_fees: Option, + &mut self, + min_fees: Option, is_replacement: bool, ) -> anyhow::Result>; + + /// Gets the current gas fees + /// + /// If `min_fees` is `Some`, the proposer will ensure the gas fees returned are at least `min_fees`. + async fn estimate_gas_fees(&self, min_fees: Option) + -> anyhow::Result<(GasFees, U256)>; + + /// Notifies the proposer that a condition was not met during the last bundle proposal + fn notify_condition_not_met(&mut self); } #[derive(Debug)] @@ -113,6 +126,7 @@ pub(crate) struct BundleProposerImpl { settings: Settings, fee_estimator: FeeEstimator

, event_sender: broadcast::Sender>, + condition_not_met_notified: bool, _uo_type: PhantomData, } @@ -138,9 +152,20 @@ where { type UO = UO; - async fn make_bundle( + async fn estimate_gas_fees( &self, required_fees: Option, + ) -> anyhow::Result<(GasFees, U256)> { + self.fee_estimator.required_bundle_fees(required_fees).await + } + + fn notify_condition_not_met(&mut self) { + self.condition_not_met_notified = true; + } + + async fn make_bundle( + &mut self, + required_fees: Option, is_replacement: bool, ) -> anyhow::Result> { let (ops, (block_hash, _), (bundle_fees, base_fee)) = try_join!( @@ -148,8 +173,11 @@ where self.provider .get_latest_block_hash_and_number() .map_err(anyhow::Error::from), - self.fee_estimator.required_bundle_fees(required_fees) + self.estimate_gas_fees(required_fees) )?; + if ops.is_empty() { + return Ok(Bundle::default()); + } tracing::debug!("Starting bundle proposal with {} ops", ops.len()); @@ -177,6 +205,9 @@ where .collect::>(); tracing::debug!("Bundle proposal after fee limit had {} ops", ops.len()); + if ops.is_empty() { + return Ok(Bundle::default()); + } // (2) Limit the amount of operations for simulation let (ops, gas_limit) = self.limit_user_operations_for_simulation(ops); @@ -215,6 +246,16 @@ where gas_estimate ); + // If recently notified that a bundle condition was not met, check each of + // the conditions again to ensure if they are met, rejecting OPs if they are not. + if self.condition_not_met_notified { + self.condition_not_met_notified = false; + self.check_conditions_met(&mut context).await?; + if context.is_empty() { + break; + } + } + let mut expected_storage = ExpectedStorage::default(); for op in context.iter_ops_with_simulations() { expected_storage.merge(&op.simulation.expected_storage)?; @@ -272,6 +313,7 @@ where ), settings, event_sender, + condition_not_met_notified: false, _uo_type: PhantomData, } } @@ -526,6 +568,73 @@ where context } + async fn check_conditions_met(&self, context: &mut ProposalContext) -> anyhow::Result<()> { + let futs = context + .iter_ops_with_simulations() + .enumerate() + .map(|(i, op)| async move { + self.check_op_conditions_met(&op.simulation.expected_storage) + .await + .map(|reason| (i, reason)) + }) + .collect::>(); + + let to_reject = future::join_all(futs).await.into_iter().flatten(); + + for (index, reason) in to_reject { + self.emit(BuilderEvent::rejected_op( + self.builder_index, + self.op_hash(&context.get_op_at(index)?.op), + OpRejectionReason::ConditionNotMet(reason), + )); + self.reject_index(context, index).await; + } + + Ok(()) + } + + async fn check_op_conditions_met( + &self, + expected_storage: &ExpectedStorage, + ) -> Option { + let futs = expected_storage + .0 + .iter() + .map(|(address, slots)| async move { + let storage = match self + .provider + .batch_get_storage_at(*address, slots.keys().copied().collect()) + .await + { + Ok(storage) => storage, + Err(e) => { + error!("Error getting storage for address {address:?} failing open: {e:?}"); + return None; + } + }; + + for ((slot, expected), actual) in slots.iter().zip(storage) { + if *expected != actual { + return Some(ConditionNotMetReason { + address: *address, + slot: *slot, + expected: *expected, + actual, + }); + } + } + None + }); + + let results = future::join_all(futs).await; + for result in results { + if result.is_some() { + return result; + } + } + None + } + async fn reject_index(&self, context: &mut ProposalContext, i: usize) { let changed_aggregator = context.reject_index(i); self.compute_aggregator_signatures(context, &changed_aggregator) @@ -1470,6 +1579,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1504,6 +1615,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1546,6 +1659,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1637,6 +1752,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; // Ops should be grouped by aggregator. Further, the `signature` field @@ -1725,6 +1842,8 @@ mod tests { vec![deposit, deposit, deposit], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1786,6 +1905,8 @@ mod tests { vec![deposit, deposit, deposit], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1915,6 +2036,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1947,6 +2070,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2013,6 +2138,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2029,6 +2156,76 @@ mod tests { ); } + #[tokio::test] + async fn test_condition_not_met_match() { + let op = default_op(); + + let mut expected_storage = ExpectedStorage::default(); + expected_storage.insert(address(1), U256::zero(), U256::zero()); + let actual_storage = expected_storage.clone(); + + let bundle = mock_make_bundle( + vec![MockOp { + op: op.clone(), + simulation_result: Box::new(move || { + Ok(SimulationResult { + expected_storage: expected_storage.clone(), + ..Default::default() + }) + }), + }], + vec![], + vec![HandleOpsOut::Success], + vec![], + U256::zero(), + U256::zero(), + true, + actual_storage, + ) + .await; + + assert_eq!( + bundle.ops_per_aggregator, + vec![UserOpsPerAggregator { + user_ops: vec![op], + ..Default::default() + }] + ); + } + + #[tokio::test] + async fn test_condition_not_met_mismatch() { + let op = default_op(); + + let mut expected_storage = ExpectedStorage::default(); + expected_storage.insert(address(1), U256::zero(), U256::zero()); + let mut actual_storage = ExpectedStorage::default(); + actual_storage.insert(address(1), U256::zero(), U256::from(1)); + + let bundle = mock_make_bundle( + vec![MockOp { + op: op.clone(), + simulation_result: Box::new(move || { + Ok(SimulationResult { + expected_storage: expected_storage.clone(), + ..Default::default() + }) + }), + }], + vec![], + vec![HandleOpsOut::Success], + vec![], + U256::zero(), + U256::zero(), + true, + actual_storage, + ) + .await; + + assert!(bundle.ops_per_aggregator.is_empty()); + assert_eq!(bundle.rejected_ops, vec![op]); + } + struct MockOp { op: UserOperation, simulation_result: Box Result + Send + Sync>, @@ -2047,10 +2244,13 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await } + #[allow(clippy::too_many_arguments)] async fn mock_make_bundle( mock_ops: Vec, mock_aggregators: Vec, @@ -2058,6 +2258,8 @@ mod tests { mock_paymaster_deposits: Vec, base_fee: U256, max_priority_fee_per_gas: U256, + notify_condition_not_met: bool, + actual_storage: ExpectedStorage, ) -> Bundle { let entry_point_address = address(123); let beneficiary = address(124); @@ -2117,6 +2319,7 @@ mod tests { .into_iter() .map(|agg| (agg.address, agg.signature)) .collect(); + let mut provider = MockProvider::new(); provider .expect_get_latest_block_hash_and_number() @@ -2127,11 +2330,21 @@ mod tests { provider .expect_get_max_priority_fee() .returning(move || Ok(max_priority_fee_per_gas)); + if notify_condition_not_met { + for (addr, slots) in actual_storage.0.into_iter() { + let values = slots.values().cloned().collect::>(); + provider + .expect_batch_get_storage_at() + .withf(move |a, s| *a == addr && s.iter().all(|slot| slots.contains_key(slot))) + .returning(move |_, _| Ok(values.clone())); + } + } + entry_point .expect_aggregate_signatures() .returning(move |address, _| Ok(signatures_by_aggregator[&address]().unwrap())); let (event_sender, _) = broadcast::channel(16); - let proposer = BundleProposerImpl::new( + let mut proposer = BundleProposerImpl::new( 0, pool_client, simulator, @@ -2147,6 +2360,11 @@ mod tests { }, event_sender, ); + + if notify_condition_not_met { + proposer.notify_condition_not_met(); + } + proposer .make_bundle(None, false) .await diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 892c17419..4c088dbb0 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -17,22 +17,27 @@ use anyhow::{bail, Context}; use async_trait::async_trait; use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; use futures_util::StreamExt; +#[cfg(test)] +use mockall::automock; use rundler_provider::{BundleHandler, EntryPoint}; use rundler_sim::ExpectedStorage; use rundler_types::{ - builder::BundlingMode, chain::ChainSpec, pool::Pool, EntityUpdate, GasFees, UserOperation, + builder::BundlingMode, + chain::ChainSpec, + pool::{NewHead, Pool}, + EntityUpdate, GasFees, UserOperation, }; use rundler_utils::emit::WithEntryPoint; use tokio::{ join, - sync::{broadcast, mpsc, oneshot}, + sync::{broadcast, mpsc, mpsc::UnboundedReceiver, oneshot}, }; -use tracing::{debug, error, info, instrument, trace, warn}; +use tracing::{debug, error, info, instrument, warn}; use crate::{ bundle_proposer::BundleProposer, emit::{BuilderEvent, BundleTxDetails}, - transaction_tracker::{SendResult, TrackerUpdate, TransactionTracker}, + transaction_tracker::{TrackerUpdate, TransactionTracker, TransactionTrackerError}, }; #[async_trait] @@ -42,22 +47,23 @@ pub(crate) trait BundleSender: Send + Sync + 'static { #[derive(Debug)] pub(crate) struct Settings { - pub(crate) replacement_fee_percent_increase: u64, pub(crate) max_fee_increases: u64, + pub(crate) max_blocks_to_wait_for_mine: u64, } #[derive(Debug)] pub(crate) struct BundleSenderImpl { builder_index: u64, - bundle_action_receiver: mpsc::Receiver, + bundle_action_receiver: Option>, chain_spec: ChainSpec, beneficiary: Address, proposer: P, entry_point: E, - transaction_tracker: T, + transaction_tracker: Option, pool: C, settings: Settings, event_sender: broadcast::Sender>, + metrics: BuilderMetrics, _uo_type: PhantomData, } @@ -77,6 +83,9 @@ pub struct SendBundleRequest { pub responder: oneshot::Sender, } +/// Response to a `SendBundleRequest` after +/// going through a full cycle of bundling, sending, +/// and waiting for the transaction to be mined. #[derive(Debug)] pub enum SendBundleResult { Success { @@ -85,14 +94,24 @@ pub enum SendBundleResult { tx_hash: H256, }, NoOperationsInitially, - NoOperationsAfterFeeIncreases { - initial_op_count: usize, - attempt_number: u64, - }, StalledAtMaxFeeIncreases, Error(anyhow::Error), } +// Internal result of attempting to send a bundle. +enum SendBundleAttemptResult { + // The bundle was successfully sent + Success, + // The bundle was empty + NoOperations, + // Replacement Underpriced + ReplacementUnderpriced, + // Condition not met + ConditionNotMet, + // Nonce too low + NonceTooLow, +} + #[async_trait] impl BundleSender for BundleSenderImpl where @@ -107,139 +126,24 @@ where /// next one. #[instrument(skip_all, fields(entry_point = self.entry_point.address().to_string(), builder_index = self.builder_index))] async fn send_bundles_in_loop(mut self) -> anyhow::Result<()> { - let Ok(mut new_heads) = self.pool.subscribe_new_heads().await else { - error!("Failed to subscribe to new blocks"); - bail!("failed to subscribe to new blocks"); - }; + // trigger for sending bundles + let sender_trigger = BundleSenderTrigger::new( + &self.pool, + self.bundle_action_receiver.take().unwrap(), + Duration::from_millis(self.chain_spec.bundle_max_send_interval_millis), + ) + .await?; - // The new_heads stream can buffer up multiple blocks, but we only want to consume the latest one. - // This task is used to consume the new heads and place them onto a channel that can be synchronously - // consumed until the latest block is reached. - let (tx, mut rx) = mpsc::unbounded_channel(); - tokio::spawn(async move { - loop { - match new_heads.next().await { - Some(b) => { - if tx.send(b).is_err() { - error!("Failed to buffer new block for bundle sender"); - return; - } - } - None => { - error!("Block stream ended"); - return; - } - } - } - }); + // initial state + let mut state = + SenderMachineState::new(sender_trigger, self.transaction_tracker.take().unwrap()); - let mut bundling_mode = BundlingMode::Auto; - let mut timer = tokio::time::interval(Duration::from_millis( - self.chain_spec.bundle_max_send_interval_millis, - )); loop { - let mut send_bundle_response: Option> = None; - let mut last_block = None; - - // 3 triggers for loop logic: - // 1 - new block - // - If auto mode, send next bundle - // 2 - timer tick - // - If auto mode, send next bundle - // 3 - action recv - // - If change mode, change and restart loop - // - If send bundle and manual mode, send next bundle - last_block = tokio::select! { - b = rx.recv() => { - match bundling_mode { - BundlingMode::Manual => continue, - BundlingMode::Auto => b - } - }, - _ = timer.tick() => { - match bundling_mode { - BundlingMode::Manual => continue, - BundlingMode::Auto => Some(last_block.unwrap_or_default()) - } - }, - a = self.bundle_action_receiver.recv() => { - match a { - Some(BundleSenderAction::ChangeMode(mode)) => { - debug!("chainging bundling mode to {mode:?}"); - bundling_mode = mode; - continue; - }, - Some(BundleSenderAction::SendBundle(r)) => { - match bundling_mode { - BundlingMode::Manual => { - send_bundle_response = Some(r.responder); - Some(last_block.unwrap_or_default()) - }, - BundlingMode::Auto => { - error!("Received bundle send action while in auto mode, ignoring"); - continue; - } - } - }, - None => { - error!("Bundle action recv closed"); - bail!("Bundle action recv closed"); - } - } - } - }; - - // Consume any other blocks that may have been buffered up - loop { - match rx.try_recv() { - Ok(b) => { - last_block = Some(b); - } - Err(mpsc::error::TryRecvError::Empty) => { - break; - } - Err(mpsc::error::TryRecvError::Disconnected) => { - error!("Block stream closed"); - bail!("Block stream closed"); - } - } - } - - // Wait for new block. Block number doesn't matter as the pool will only notify of new blocks - // after the pool has updated its state. The bundle will be formed using the latest pool state - // and can land in the next block - self.check_for_and_log_transaction_update().await; - let result = self.send_bundle_with_increasing_gas_fees().await; - match &result { - SendBundleResult::Success { - block_number, - attempt_number, - tx_hash, - } => - if *attempt_number == 0 { - info!("Bundle with hash {tx_hash:?} landed in block {block_number}"); - } else { - info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); - } - SendBundleResult::NoOperationsInitially => trace!("No ops to send at block {}", last_block.unwrap_or_default().block_number), - SendBundleResult::NoOperationsAfterFeeIncreases { - initial_op_count, - attempt_number, - } => info!("Bundle initially had {initial_op_count} operations, but after increasing gas fees {attempt_number} time(s) it was empty"), - SendBundleResult::StalledAtMaxFeeIncreases => warn!("Bundle failed to mine after {} fee increases", self.settings.max_fee_increases), - SendBundleResult::Error(error) => { - BuilderMetrics::increment_bundle_txns_failed(self.builder_index, self.entry_point.address()); - error!("Failed to send bundle. Will retry next block: {error:#?}"); - } - } - - if let Some(t) = send_bundle_response.take() { - if t.send(result).is_err() { - error!("Failed to send bundle result to manual caller"); - } + if let Err(e) = self.step_state(&mut state).await { + error!("Error in bundle sender loop: {e:#?}"); + self.metrics.increment_state_machine_errors(); + state.reset(); } - - timer.reset(); } } } @@ -267,266 +171,359 @@ where ) -> Self { Self { builder_index, - bundle_action_receiver, + bundle_action_receiver: Some(bundle_action_receiver), chain_spec, beneficiary, proposer, - entry_point, - transaction_tracker, + transaction_tracker: Some(transaction_tracker), pool, settings, event_sender, + metrics: BuilderMetrics { + builder_index, + entry_point: entry_point.address(), + }, + entry_point, _uo_type: PhantomData, } } - async fn check_for_and_log_transaction_update(&self) { - let update = self.transaction_tracker.check_for_update_now().await; - let update = match update { - Ok(update) => update, - Err(error) => { - error!("Failed to check for transaction updates: {error:#?}"); - return; - } - }; - let Some(update) = update else { - return; - }; - match update { - TrackerUpdate::Mined { - tx_hash, - block_number, - attempt_number, - gas_limit, - gas_used, - .. - } => { - BuilderMetrics::increment_bundle_txns_success( - self.builder_index, - self.entry_point.address(), - ); - BuilderMetrics::set_bundle_gas_stats( - gas_limit, - gas_used, - self.builder_index, - self.entry_point.address(), - ); - if attempt_number == 0 { - info!("Bundle with hash {tx_hash:?} landed in block {block_number}"); - } else { - info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); - } + async fn step_state( + &mut self, + state: &mut SenderMachineState, + ) -> anyhow::Result<()> { + let tracker_update = state.wait_for_trigger().await?; + + match state.inner { + InnerState::Building(building_state) => { + self.handle_building_state(state, building_state).await?; } - TrackerUpdate::StillPendingAfterWait => (), - TrackerUpdate::LatestTxDropped { nonce } => { - self.emit(BuilderEvent::latest_transaction_dropped( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_dropped( - self.builder_index, - self.entry_point.address(), - ); - info!("Previous transaction dropped by sender"); + InnerState::Pending(pending_state) => { + self.handle_pending_state(state, pending_state, tracker_update) + .await?; } - TrackerUpdate::NonceUsedForOtherTx { nonce } => { - self.emit(BuilderEvent::nonce_used_for_other_transaction( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_nonce_used( - self.builder_index, - self.entry_point.address(), - ); - info!("Nonce used by external transaction") + InnerState::Cancelling(cancelling_state) => { + self.handle_cancelling_state(state, cancelling_state) + .await?; } - TrackerUpdate::ReplacementUnderpriced => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced( - self.builder_index, - self.entry_point.address(), - ); - info!("Replacement transaction underpriced") + InnerState::CancelPending(cancel_pending_state) => { + self.handle_cancel_pending_state(state, cancel_pending_state, tracker_update) + .await?; } - }; - } - - /// Constructs a bundle and sends it to the entry point as a transaction. If - /// the bundle fails to be mined after - /// `settings.max_blocks_to_wait_for_mine` blocks, increases the gas fees by - /// enough to send a replacement transaction, then constructs a new bundle - /// using the new, higher gas requirements. Continues to retry with higher - /// gas costs until one of the following happens: - /// - /// 1. A transaction succeeds (not necessarily the most recent one) - /// 2. The gas fees are high enough that the bundle is empty because there - /// are no ops that meet the fee requirements. - /// 3. The transaction has not succeeded after `settings.max_fee_increases` - /// replacements. - async fn send_bundle_with_increasing_gas_fees(&self) -> SendBundleResult { - let result = self.send_bundle_with_increasing_gas_fees_inner().await; - match result { - Ok(result) => result, - Err(error) => SendBundleResult::Error(error), } + + Ok(()) } - /// Helper function returning `Result` to be able to use `?`. - async fn send_bundle_with_increasing_gas_fees_inner(&self) -> anyhow::Result { - let (nonce, mut required_fees) = self.transaction_tracker.get_nonce_and_required_fees()?; - let mut initial_op_count: Option = None; - let mut is_replacement = false; + async fn handle_building_state( + &mut self, + state: &mut SenderMachineState, + inner: BuildingState, + ) -> anyhow::Result<()> { + // send bundle + let block_number = state.block_number(); + debug!("Building bundle on block {}", block_number); + let result = self.send_bundle(state, inner.fee_increase_count).await; - for fee_increase_count in 0..=self.settings.max_fee_increases { - let Some(bundle_tx) = self - .get_bundle_tx(nonce, required_fees, is_replacement) - .await? - else { - self.emit(BuilderEvent::formed_bundle( - self.builder_index, - None, - nonce.low_u64(), - fee_increase_count, - required_fees, - )); - return Ok(match initial_op_count { - Some(initial_op_count) => { - BuilderMetrics::increment_bundle_txns_abandoned( - self.builder_index, - self.entry_point.address(), - ); - SendBundleResult::NoOperationsAfterFeeIncreases { - initial_op_count, - attempt_number: fee_increase_count, - } - } - None => SendBundleResult::NoOperationsInitially, - }); - }; - let BundleTx { - tx, - expected_storage, - op_hashes, - } = bundle_tx; - if initial_op_count.is_none() { - initial_op_count = Some(op_hashes.len()); + // handle result + match result { + Ok(SendBundleAttemptResult::Success) => { + // sent the bundle + info!("Bundle sent successfully"); + state.update(InnerState::Pending(inner.to_pending( + block_number + self.settings.max_blocks_to_wait_for_mine, + ))); } - let current_fees = GasFees::from(&tx); - - BuilderMetrics::increment_bundle_txns_sent( - self.builder_index, - self.entry_point.address(), - ); + Ok(SendBundleAttemptResult::NoOperations) => { + debug!("No operations to bundle"); + if inner.fee_increase_count > 0 { + warn!( + "Abandoning bundle after fee increases {}, no operations available", + inner.fee_increase_count + ); + self.metrics.increment_bundle_txns_abandoned(); - let send_result = self - .transaction_tracker - .send_transaction(tx.clone(), &expected_storage) - .await?; - let update = match send_result { - SendResult::TrackerUpdate(update) => update, - SendResult::TxHash(tx_hash) => { - self.emit(BuilderEvent::formed_bundle( - self.builder_index, - Some(BundleTxDetails { - tx_hash, - tx, - op_hashes: Arc::new(op_hashes), - }), - nonce.low_u64(), - fee_increase_count, - required_fees, - )); - self.transaction_tracker.wait_for_update().await? + // abandon the bundle by starting a new bundle process + // If the node we are using still has the transaction in the mempool, its + // possible we will get a `ReplacementUnderpriced` on the next iteration + // and will start a cancellation. + state.reset(); + } else { + debug!("No operations available, waiting for next trigger"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); } - }; + } + Ok(SendBundleAttemptResult::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low, starting new bundle attempt"); + state.reset(); + } + Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced, entering cancellation loop"); + state.update(InnerState::Cancelling(inner.to_cancelling())); + } + Ok(SendBundleAttemptResult::ConditionNotMet) => { + info!("Condition not met, notifying proposer and starting new bundle attempt"); + self.proposer.notify_condition_not_met(); + state.reset(); + } + Err(error) => { + error!("Bundle send error {error:?}"); + self.metrics.increment_bundle_txns_failed(); + let send_bundle_result = Some(SendBundleResult::Error(error)); + state.complete(send_bundle_result); + } + } + + Ok(()) + } + + async fn handle_pending_state( + &mut self, + state: &mut SenderMachineState, + inner: PendingState, + tracker_update: Option, + ) -> anyhow::Result<()> { + if let Some(update) = tracker_update { match update { TrackerUpdate::Mined { - tx_hash, - nonce, block_number, attempt_number, gas_limit, gas_used, + tx_hash, + nonce, + .. } => { + info!("Bundle transaction mined"); + self.metrics.process_bundle_txn_success(gas_limit, gas_used); self.emit(BuilderEvent::transaction_mined( self.builder_index, tx_hash, nonce.low_u64(), block_number, )); - BuilderMetrics::increment_bundle_txns_success( - self.builder_index, - self.entry_point.address(), - ); - BuilderMetrics::set_bundle_gas_stats( - gas_limit, - gas_used, - self.builder_index, - self.entry_point.address(), - ); - return Ok(SendBundleResult::Success { + let send_bundle_result = Some(SendBundleResult::Success { block_number, attempt_number, tx_hash, }); - } - TrackerUpdate::StillPendingAfterWait => { - info!("Transaction not mined for several blocks") + state.complete(send_bundle_result); } TrackerUpdate::LatestTxDropped { nonce } => { + // try again, don't wait for trigger, re-estimate fees + info!("Latest transaction dropped, starting new bundle attempt"); self.emit(BuilderEvent::latest_transaction_dropped( self.builder_index, nonce.low_u64(), )); - BuilderMetrics::increment_bundle_txns_dropped( - self.builder_index, - self.entry_point.address(), - ); - info!("Previous transaction dropped by sender"); + self.metrics.increment_bundle_txns_dropped(); + state.reset(); } TrackerUpdate::NonceUsedForOtherTx { nonce } => { + // try again, don't wait for trigger, re-estimate fees + info!("Nonce used externally, starting new bundle attempt"); self.emit(BuilderEvent::nonce_used_for_other_transaction( self.builder_index, nonce.low_u64(), )); - BuilderMetrics::increment_bundle_txns_nonce_used( - self.builder_index, - self.entry_point.address(), - ); - bail!("nonce used by external transaction") - } - TrackerUpdate::ReplacementUnderpriced => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced( - self.builder_index, - self.entry_point.address(), - ); - info!("Replacement transaction underpriced, increasing fees") + self.metrics.increment_bundle_txns_nonce_used(); + state.reset(); } - }; + } + } else if state.block_number() >= inner.until { + // start replacement, don't wait for trigger. Continue + // to attempt until there are no longer any UOs priced high enough + // to bundle. info!( - "Bundle transaction failed to mine after {fee_increase_count} fee increases (maxFeePerGas: {}, maxPriorityFeePerGas: {}).", - current_fees.max_fee_per_gas, - current_fees.max_priority_fee_per_gas, + "Not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + inner.fee_increase_count + 1 ); - BuilderMetrics::increment_bundle_txn_fee_increases( + self.metrics.increment_bundle_txn_fee_increases(); + state.update(InnerState::Building(inner.to_building())) + } + + Ok(()) + } + + async fn handle_cancelling_state( + &mut self, + state: &mut SenderMachineState, + inner: CancellingState, + ) -> anyhow::Result<()> { + info!("Cancelling last transaction"); + + let (estimated_fees, _) = self + .proposer + .estimate_gas_fees(None) + .await + .unwrap_or_default(); + + let cancel_res = state + .transaction_tracker + .cancel_transaction(self.entry_point.address(), estimated_fees) + .await; + + match cancel_res { + Ok(Some(_)) => { + info!("Cancellation transaction sent, waiting for confirmation"); + self.metrics.increment_cancellation_txns_sent(); + + state.update(InnerState::CancelPending(inner.to_cancel_pending( + state.block_number() + self.settings.max_blocks_to_wait_for_mine, + ))); + } + Ok(None) => { + info!("Soft cancellation or no transaction to cancel, starting new bundle attempt"); + self.metrics.increment_soft_cancellations(); + state.reset(); + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced during cancellation, trying again"); + state.update(InnerState::Cancelling(inner.to_self())); + } + Err(TransactionTrackerError::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low during cancellation, starting new bundle attempt"); + state.reset(); + } + Err(e) => { + error!("Failed to cancel transaction, moving back to building state: {e:#?}"); + self.metrics.increment_cancellation_txns_failed(); + state.reset(); + } + } + + Ok(()) + } + + async fn handle_cancel_pending_state( + &mut self, + state: &mut SenderMachineState, + inner: CancelPendingState, + tracker_update: Option, + ) -> anyhow::Result<()> { + // check for transaction update + if let Some(update) = tracker_update { + match update { + TrackerUpdate::Mined { .. } => { + // mined + info!("Cancellation transaction mined"); + self.metrics.increment_cancellation_txns_mined(); + } + TrackerUpdate::LatestTxDropped { .. } => { + // If a cancellation gets dropped, move to bundling state as there is no + // longer a pending transaction + info!("Cancellation transaction dropped, starting new bundle attempt"); + } + TrackerUpdate::NonceUsedForOtherTx { .. } => { + // If a nonce is used externally, move to bundling state as there is no longer + // a pending transaction + info!("Nonce used externally while cancelling, starting new bundle attempt"); + } + } + state.reset(); + } else if state.block_number() >= inner.until { + if inner.fee_increase_count >= self.settings.max_fee_increases { + // abandon the cancellation + warn!("Abandoning cancellation after max fee increases {}, starting new bundle attempt", inner.fee_increase_count); + state.reset(); + } else { + // start replacement, don't wait for trigger + info!( + "Cancellation not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + inner.fee_increase_count + 1 + ); + state.update(InnerState::Cancelling(inner.to_cancelling())); + } + } + + Ok(()) + } + + /// Constructs a bundle and sends it to the entry point as a transaction. + /// + /// Returns empty if: + /// - There are no ops available to bundle initially. + /// - The gas fees are high enough that the bundle is empty because there + /// are no ops that meet the fee requirements. + async fn send_bundle( + &mut self, + state: &mut SenderMachineState, + fee_increase_count: u64, + ) -> anyhow::Result { + let (nonce, required_fees) = state.transaction_tracker.get_nonce_and_required_fees()?; + + let Some(bundle_tx) = self + .get_bundle_tx(nonce, required_fees, fee_increase_count > 0) + .await? + else { + self.emit(BuilderEvent::formed_bundle( self.builder_index, - self.entry_point.address(), - ); - required_fees = Some( - current_fees.increase_by_percent(self.settings.replacement_fee_percent_increase), - ); - is_replacement = true; + None, + nonce.low_u64(), + fee_increase_count, + required_fees, + )); + return Ok(SendBundleAttemptResult::NoOperations); + }; + let BundleTx { + tx, + expected_storage, + op_hashes, + } = bundle_tx; + + self.metrics.increment_bundle_txns_sent(); + + let send_result = state + .transaction_tracker + .send_transaction(tx.clone(), &expected_storage) + .await; + + match send_result { + Ok(tx_hash) => { + self.emit(BuilderEvent::formed_bundle( + self.builder_index, + Some(BundleTxDetails { + tx_hash, + tx, + op_hashes: Arc::new(op_hashes), + }), + nonce.low_u64(), + fee_increase_count, + required_fees, + )); + + Ok(SendBundleAttemptResult::Success) + } + Err(TransactionTrackerError::NonceTooLow) => { + self.metrics.increment_bundle_txn_nonce_too_low(); + warn!("Bundle attempt nonce too low"); + Ok(SendBundleAttemptResult::NonceTooLow) + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + self.metrics.increment_bundle_txn_replacement_underpriced(); + warn!("Bundle attempt replacement transaction underpriced"); + Ok(SendBundleAttemptResult::ReplacementUnderpriced) + } + Err(TransactionTrackerError::ConditionNotMet) => { + self.metrics.increment_bundle_txn_condition_not_met(); + warn!("Bundle attempt condition not met"); + Ok(SendBundleAttemptResult::ConditionNotMet) + } + Err(e) => { + error!("Failed to send bundle with unexpected error: {e:?}"); + Err(e.into()) + } } - BuilderMetrics::increment_bundle_txns_abandoned( - self.builder_index, - self.entry_point.address(), - ); - Ok(SendBundleResult::StalledAtMaxFeeIncreases) } /// Builds a bundle and returns some metadata and the transaction to send /// it, or `None` if there are no valid operations available. async fn get_bundle_tx( - &self, + &mut self, nonce: U256, required_fees: Option, is_replacement: bool, @@ -536,19 +533,31 @@ where .make_bundle(required_fees, is_replacement) .await .context("proposer should create bundle for builder")?; + let remove_ops_future = async { + if bundle.rejected_ops.is_empty() { + return; + } + let result = self.remove_ops_from_pool(&bundle.rejected_ops).await; if let Err(error) = result { error!("Failed to remove rejected ops from pool: {error}"); } }; + let update_entities_future = async { + if bundle.entity_updates.is_empty() { + return; + } + let result = self.update_entities_in_pool(&bundle.entity_updates).await; if let Err(error) = result { error!("Failed to update entities in pool: {error}"); } }; + join!(remove_ops_future, update_entities_future); + if bundle.is_empty() { if !bundle.rejected_ops.is_empty() || !bundle.entity_updates.is_empty() { info!( @@ -611,55 +620,947 @@ where } } -struct BuilderMetrics {} +struct SenderMachineState { + trigger: TRIG, + transaction_tracker: T, + send_bundle_response: Option>, + inner: InnerState, + requires_reset: bool, +} -impl BuilderMetrics { - fn increment_bundle_txns_sent(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_sent", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()) - .increment(1); +impl SenderMachineState { + fn new(trigger: TRIG, transaction_tracker: T) -> Self { + Self { + trigger, + transaction_tracker, + send_bundle_response: None, + inner: InnerState::new(), + requires_reset: false, + } } - fn increment_bundle_txns_success(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_success", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn update(&mut self, inner: InnerState) { + self.inner = inner; } - fn increment_bundle_txns_dropped(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_dropped", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + // resets the state machine to the initial state, doesn't wait for next trigger + fn reset(&mut self) { + self.requires_reset = true; + let building_state = BuildingState { + wait_for_trigger: false, + fee_increase_count: 0, + }; + self.inner = InnerState::Building(building_state); } - // used when we decide to stop trying a transaction - fn increment_bundle_txns_abandoned(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_abandoned", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn complete(&mut self, result: Option) { + if let Some(result) = result { + if let Some(r) = self.send_bundle_response.take() { + if r.send(result).is_err() { + error!("Failed to send bundle result to manual caller"); + } + } + } + self.inner = InnerState::new(); } - // used when sending a transaction fails - fn increment_bundle_txns_failed(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_failed", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); - } + async fn wait_for_trigger(&mut self) -> anyhow::Result> { + if self.requires_reset { + self.transaction_tracker.reset().await; + self.requires_reset = false; + } - fn increment_bundle_txns_nonce_used(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_nonce_used", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); - } + match &self.inner { + InnerState::Building(s) => { + if !s.wait_for_trigger { + return Ok(None); + } - fn increment_bundle_txn_fee_increases(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_fee_increases", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + self.send_bundle_response = self.trigger.wait_for_trigger().await?; + self.transaction_tracker + .check_for_update() + .await + .map_err(|e| anyhow::anyhow!("transaction tracker update error {e:?}")) + } + InnerState::Pending(..) | InnerState::CancelPending(..) => { + self.trigger.wait_for_block().await?; + self.transaction_tracker + .check_for_update() + .await + .map_err(|e| anyhow::anyhow!("transaction tracker update error {e:?}")) + } + InnerState::Cancelling(..) => Ok(None), + } } - fn increment_bundle_txn_replacement_underpriced(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn block_number(&self) -> u64 { + self.trigger.last_block().block_number } +} + +// State of the sender loop +enum InnerState { + // Building a bundle, optionally waiting for a trigger to send it + Building(BuildingState), + // Waiting for a bundle to be mined + Pending(PendingState), + // Cancelling the last transaction + Cancelling(CancellingState), + // Waiting for a cancellation transaction to be mined + CancelPending(CancelPendingState), +} + +impl InnerState { + fn new() -> Self { + InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + }) + } +} + +#[derive(Debug, Clone, Copy)] +struct BuildingState { + wait_for_trigger: bool, + fee_increase_count: u64, +} + +impl BuildingState { + fn to_pending(self, until: u64) -> PendingState { + PendingState { + until, + fee_increase_count: self.fee_increase_count, + } + } + + fn to_cancelling(self) -> CancellingState { + CancellingState { + fee_increase_count: 0, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct PendingState { + until: u64, + fee_increase_count: u64, +} + +impl PendingState { + fn to_building(self) -> BuildingState { + BuildingState { + wait_for_trigger: false, + fee_increase_count: self.fee_increase_count + 1, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct CancellingState { + fee_increase_count: u64, +} + +impl CancellingState { + fn to_self(mut self) -> Self { + self.fee_increase_count += 1; + self + } + + fn to_cancel_pending(self, until: u64) -> CancelPendingState { + CancelPendingState { + until, + fee_increase_count: self.fee_increase_count, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct CancelPendingState { + until: u64, + fee_increase_count: u64, +} + +impl CancelPendingState { + fn to_cancelling(self) -> CancellingState { + CancellingState { + fee_increase_count: self.fee_increase_count + 1, + } + } +} + +#[async_trait] +#[cfg_attr(test, automock)] +trait Trigger { + async fn wait_for_trigger( + &mut self, + ) -> anyhow::Result>>; + + async fn wait_for_block(&mut self) -> anyhow::Result; + + fn last_block(&self) -> &NewHead; +} + +struct BundleSenderTrigger { + bundling_mode: BundlingMode, + block_rx: UnboundedReceiver, + bundle_action_receiver: mpsc::Receiver, + timer: tokio::time::Interval, + last_block: NewHead, +} + +#[async_trait] +impl Trigger for BundleSenderTrigger { + async fn wait_for_trigger( + &mut self, + ) -> anyhow::Result>> { + let mut send_bundle_response: Option> = None; + + loop { + // 3 triggers for loop logic: + // 1 - new block + // - If auto mode, send next bundle + // 2 - timer tick + // - If auto mode, send next bundle + // 3 - action recv + // - If change mode, change and restart loop + // - If send bundle and manual mode, send next bundle + tokio::select! { + b = self.block_rx.recv() => { + let Some(b) = b else { + error!("Block stream closed"); + bail!("Block stream closed"); + }; + + self.last_block = b; + + match self.bundling_mode { + BundlingMode::Manual => continue, + BundlingMode::Auto => break, + } + }, + _ = self.timer.tick() => { + match self.bundling_mode { + BundlingMode::Manual => continue, + BundlingMode::Auto => break, + } + }, + a = self.bundle_action_receiver.recv() => { + match a { + Some(BundleSenderAction::ChangeMode(mode)) => { + debug!("changing bundling mode to {mode:?}"); + self.bundling_mode = mode; + continue; + }, + Some(BundleSenderAction::SendBundle(r)) => { + match self.bundling_mode { + BundlingMode::Manual => { + send_bundle_response = Some(r.responder); + break; + }, + BundlingMode::Auto => { + error!("Received bundle send action while in auto mode, ignoring"); + continue; + } + } + }, + None => { + error!("Bundle action recv closed"); + bail!("Bundle action recv closed"); + } + } + } + }; + } + + self.consume_blocks()?; + + Ok(send_bundle_response) + } + + async fn wait_for_block(&mut self) -> anyhow::Result { + self.last_block = self + .block_rx + .recv() + .await + .ok_or_else(|| anyhow::anyhow!("Block stream closed"))?; + self.consume_blocks()?; + Ok(self.last_block.clone()) + } + + fn last_block(&self) -> &NewHead { + &self.last_block + } +} + +impl BundleSenderTrigger { + async fn new( + pool_client: &P, + bundle_action_receiver: mpsc::Receiver, + timer_interval: Duration, + ) -> anyhow::Result { + let block_rx = Self::start_block_stream(pool_client).await?; + + Ok(Self { + bundling_mode: BundlingMode::Auto, + block_rx, + bundle_action_receiver, + timer: tokio::time::interval(timer_interval), + last_block: NewHead { + block_hash: H256::zero(), + block_number: 0, + }, + }) + } + + async fn start_block_stream( + pool_client: &P, + ) -> anyhow::Result> { + let Ok(mut new_heads) = pool_client.subscribe_new_heads().await else { + error!("Failed to subscribe to new blocks"); + bail!("failed to subscribe to new blocks"); + }; + + let (tx, rx) = mpsc::unbounded_channel(); + tokio::spawn(async move { + loop { + match new_heads.next().await { + Some(b) => { + if tx.send(b).is_err() { + error!("Failed to buffer new block for bundle sender"); + return; + } + } + None => { + error!("Block stream ended"); + return; + } + } + } + }); + + Ok(rx) + } + + fn consume_blocks(&mut self) -> anyhow::Result<()> { + // Consume any other blocks that may have been buffered up + loop { + match self.block_rx.try_recv() { + Ok(b) => { + self.last_block = b; + } + Err(mpsc::error::TryRecvError::Empty) => { + return Ok(()); + } + Err(mpsc::error::TryRecvError::Disconnected) => { + error!("Block stream closed"); + bail!("Block stream closed"); + } + } + } + } +} + +#[derive(Debug, Clone)] +struct BuilderMetrics { + builder_index: u64, + entry_point: Address, +} + +impl BuilderMetrics { + fn increment_bundle_txns_sent(&self) { + metrics::counter!("builder_bundle_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()) + .increment(1); + } + + fn process_bundle_txn_success(&self, gas_limit: Option, gas_used: Option) { + metrics::counter!("builder_bundle_txns_success", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); - fn set_bundle_gas_stats( - gas_limit: Option, - gas_used: Option, - builder_index: u64, - entry_point: Address, - ) { if let Some(limit) = gas_limit { - metrics::counter!("builder_bundle_gas_limit", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(limit.as_u64()); + metrics::counter!("builder_bundle_gas_limit", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(limit.as_u64()); } if let Some(used) = gas_used { - metrics::counter!("builder_bundle_gas_used", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(used.as_u64()); + metrics::counter!("builder_bundle_gas_used", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(used.as_u64()); + } + } + + fn increment_bundle_txns_dropped(&self) { + metrics::counter!("builder_bundle_txns_dropped", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + // used when we decide to stop trying a transaction + fn increment_bundle_txns_abandoned(&self) { + metrics::counter!("builder_bundle_txns_abandoned", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + // used when sending a transaction fails + fn increment_bundle_txns_failed(&self) { + metrics::counter!("builder_bundle_txns_failed", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txns_nonce_used(&self) { + metrics::counter!("builder_bundle_txns_nonce_used", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_fee_increases(&self) { + metrics::counter!("builder_bundle_fee_increases", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_replacement_underpriced(&self) { + metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_nonce_too_low(&self) { + metrics::counter!("builder_bundle_nonce_too_low", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_condition_not_met(&self) { + metrics::counter!("builder_bundle_condition_not_met", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_sent(&self) { + metrics::counter!("builder_cancellation_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_mined(&self) { + metrics::counter!("builder_cancellation_txns_mined", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_soft_cancellations(&self) { + metrics::counter!("builder_soft_cancellations", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_failed(&self) { + metrics::counter!("builder_cancellation_txns_failed", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_state_machine_errors(&self) { + metrics::counter!("builder_state_machine_errors", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } +} + +#[cfg(test)] +mod tests { + use ethers::types::Bytes; + use mockall::Sequence; + use rundler_provider::MockEntryPointV0_6; + use rundler_types::{ + chain::ChainSpec, pool::MockPool, v0_6::UserOperation, UserOpsPerAggregator, + }; + use tokio::sync::{broadcast, mpsc}; + + use super::*; + use crate::{ + bundle_proposer::{Bundle, MockBundleProposer}, + bundle_sender::{BundleSenderImpl, MockTrigger}, + transaction_tracker::MockTransactionTracker, + }; + + #[tokio::test] + async fn test_empty_send() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + // block 0 + add_trigger_no_update_last_block( + &mut mock_trigger, + &mut mock_tracker, + &mut Sequence::new(), + 0, + ); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // empty bundle + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(Bundle::::default()) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in building state + let mut state = SenderMachineState::new(mock_trigger, mock_tracker); + + sender.step_state(&mut state).await.unwrap(); + + // empty bundle shouldn't move out of building state + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: true, + .. + }) + )); + } + + #[tokio::test] + async fn test_send() { + let Mocks { + mut mock_proposer, + mut mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + // block 0 + add_trigger_no_update_last_block( + &mut mock_trigger, + &mut mock_tracker, + &mut Sequence::new(), + 0, + ); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // bundle with one op + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(bundle()) })); + + // should create the bundle txn + mock_entry_point + .expect_get_send_bundle_transaction() + .returning(|_, _, _, _| TypedTransaction::default()); + + // should send the bundle txn + mock_tracker + .expect_send_transaction() + .returning(|_, _| Box::pin(async { Ok(H256::zero()) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in building state + let mut state = SenderMachineState::new(mock_trigger, mock_tracker); + + sender.step_state(&mut state).await.unwrap(); + + // end in the pending state + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { + until: 3, // block 0 + wait 3 blocks + .. + }) + )); + } + + #[tokio::test] + async fn test_wait_for_mine_success() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, 1); + mock_trigger + .expect_wait_for_block() + .once() + .in_sequence(&mut seq) + .returning(|| { + Box::pin(async { + Ok(NewHead { + block_number: 2, + block_hash: H256::zero(), + }) + }) + }); + // no call to last_block after mine + + let mut seq = Sequence::new(); + mock_tracker + .expect_check_for_update() + .once() + .in_sequence(&mut seq) + .returning(|| Box::pin(async { Ok(None) })); + mock_tracker + .expect_check_for_update() + .once() + .in_sequence(&mut seq) + .returning(|| { + Box::pin(async { + Ok(Some(TrackerUpdate::Mined { + block_number: 2, + nonce: U256::zero(), + gas_limit: None, + gas_used: None, + tx_hash: H256::zero(), + attempt_number: 0, + })) + }) + }); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in pending state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Pending(PendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + // first step has no update + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { until: 3, .. }) + )); + + // second step is mined and moves back to building + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + }) + )); + } + + #[tokio::test] + async fn test_wait_for_mine_timed_out() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + for i in 1..=3 { + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, i); + } + + mock_tracker + .expect_check_for_update() + .times(3) + .returning(|| Box::pin(async { Ok(None) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in pending state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Pending(PendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + // first and second step has no update + for _ in 0..2 { + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { until: 3, .. }) + )); + } + + // third step times out and moves back to building with a fee increase + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: false, + fee_increase_count: 1, + }) + )); + } + + #[tokio::test] + async fn test_send_cancel() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + mock_proposer + .expect_estimate_gas_fees() + .once() + .returning(|_| Box::pin(async { Ok((GasFees::default(), U256::zero())) })); + + mock_tracker + .expect_cancel_transaction() + .once() + .returning(|_, _| Box::pin(async { Ok(Some(H256::zero())) })); + + mock_trigger.expect_last_block().return_const(NewHead { + block_number: 0, + block_hash: H256::zero(), + }); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Cancelling(CancellingState { + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }) + )); + } + + #[tokio::test] + async fn test_resubmit_cancel() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + for i in 1..=3 { + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, i); + } + + mock_tracker + .expect_check_for_update() + .times(3) + .returning(|| Box::pin(async { Ok(None) })); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + for _ in 0..2 { + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }) + )); + } + + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Cancelling(CancellingState { + fee_increase_count: 1, + }) + )); + } + + #[tokio::test] + async fn test_condition_not_met() { + let Mocks { + mut mock_proposer, + mut mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_no_update_last_block(&mut mock_trigger, &mut mock_tracker, &mut seq, 1); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // bundle with one op + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(bundle()) })); + + // should create the bundle txn + mock_entry_point + .expect_get_send_bundle_transaction() + .returning(|_, _, _, _| TypedTransaction::default()); + + // should send the bundle txn, returns condition not met + mock_tracker + .expect_send_transaction() + .returning(|_, _| Box::pin(async { Err(TransactionTrackerError::ConditionNotMet) })); + + // should notify proposer that condition was not met + mock_proposer + .expect_notify_condition_not_met() + .times(1) + .return_const(()); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + sender.step_state(&mut state).await.unwrap(); + + // end back in the building state without waiting for trigger + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: false, + fee_increase_count: 0, + }) + )); + } + + struct Mocks { + mock_proposer: MockBundleProposer, + mock_entry_point: MockEntryPointV0_6, + mock_tracker: MockTransactionTracker, + mock_trigger: MockTrigger, + } + + fn new_mocks() -> Mocks { + let mut mock_entry_point = MockEntryPointV0_6::new(); + mock_entry_point + .expect_address() + .return_const(Address::default()); + + Mocks { + mock_proposer: MockBundleProposer::new(), + mock_entry_point, + mock_tracker: MockTransactionTracker::new(), + mock_trigger: MockTrigger::new(), + } + } + + fn new_sender( + mock_proposer: MockBundleProposer, + mock_entry_point: MockEntryPointV0_6, + ) -> BundleSenderImpl< + UserOperation, + MockBundleProposer, + MockEntryPointV0_6, + MockTransactionTracker, + MockPool, + > { + BundleSenderImpl::new( + 0, + mpsc::channel(1000).1, + ChainSpec::default(), + Address::default(), + mock_proposer, + mock_entry_point, + MockTransactionTracker::new(), + MockPool::new(), + Settings { + max_fee_increases: 3, + max_blocks_to_wait_for_mine: 3, + }, + broadcast::channel(1000).0, + ) + } + + fn add_trigger_no_update_last_block( + mock_trigger: &mut MockTrigger, + mock_tracker: &mut MockTransactionTracker, + seq: &mut Sequence, + block_number: u64, + ) { + mock_trigger + .expect_wait_for_trigger() + .once() + .in_sequence(seq) + .returning(move || Box::pin(async move { Ok(None) })); + mock_tracker + .expect_check_for_update() + .returning(|| Box::pin(async { Ok(None) })); + mock_trigger + .expect_last_block() + .once() + .in_sequence(seq) + .return_const(NewHead { + block_number, + block_hash: H256::zero(), + }); + } + + fn add_trigger_wait_for_block_last_block( + mock_trigger: &mut MockTrigger, + seq: &mut Sequence, + block_number: u64, + ) { + mock_trigger + .expect_wait_for_block() + .once() + .in_sequence(seq) + .returning(move || { + Box::pin(async move { + Ok(NewHead { + block_number, + block_hash: H256::zero(), + }) + }) + }); + mock_trigger + .expect_last_block() + .once() + .in_sequence(seq) + .return_const(NewHead { + block_number, + block_hash: H256::zero(), + }); + } + + fn bundle() -> Bundle { + Bundle { + gas_estimate: U256::from(100_000), + gas_fees: GasFees::default(), + expected_storage: Default::default(), + rejected_ops: vec![], + entity_updates: vec![], + ops_per_aggregator: vec![UserOpsPerAggregator { + aggregator: Address::zero(), + signature: Bytes::new(), + user_ops: vec![UserOperation::default()], + }], } } } diff --git a/crates/builder/src/emit.rs b/crates/builder/src/emit.rs index e5c70cd36..66de76f01 100644 --- a/crates/builder/src/emit.rs +++ b/crates/builder/src/emit.rs @@ -196,6 +196,17 @@ pub enum OpRejectionReason { FailedRevalidation { error: SimulationError }, /// Operation reverted during bundle formation simulation with message FailedInBundle { message: Arc }, + /// Operation's storage slot condition was not met + ConditionNotMet(ConditionNotMetReason), +} + +/// Reason for a condition not being met +#[derive(Clone, Debug)] +pub struct ConditionNotMetReason { + pub address: Address, + pub slot: H256, + pub expected: H256, + pub actual: H256, } impl Display for BuilderEvent { diff --git a/crates/builder/src/lib.rs b/crates/builder/src/lib.rs index 3363679ff..ceee246fc 100644 --- a/crates/builder/src/lib.rs +++ b/crates/builder/src/lib.rs @@ -27,7 +27,8 @@ pub use emit::{BuilderEvent, BuilderEventKind}; mod sender; pub use sender::{ - BloxrouteSenderArgs, FlashbotsSenderArgs, TransactionSenderArgs, TransactionSenderKind, + BloxrouteSenderArgs, FlashbotsSenderArgs, RawSenderArgs, TransactionSenderArgs, + TransactionSenderKind, }; mod server; diff --git a/crates/builder/src/sender/bloxroute.rs b/crates/builder/src/sender/bloxroute.rs index b23ea7a92..66a8807b3 100644 --- a/crates/builder/src/sender/bloxroute.rs +++ b/crates/builder/src/sender/bloxroute.rs @@ -19,6 +19,7 @@ use ethers::{ providers::{JsonRpcClient, Middleware, Provider}, types::{ transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, + U256, }, utils::hex, }; @@ -28,12 +29,16 @@ use jsonrpsee::{ http_client::{transport::HttpBackend, HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}, }; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue; use tokio::time; use tonic::async_trait; -use super::{fill_and_sign, Result, SentTxInfo, TransactionSender, TxStatus}; +use super::{ + create_hard_cancel_tx, fill_and_sign, CancelTxInfo, Result, SentTxInfo, TransactionSender, + TxStatus, +}; pub(crate) struct PolygonBloxrouteTransactionSender where @@ -62,6 +67,29 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; + + let tx_hash = self + .provider + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await?; + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let tx = self .provider diff --git a/crates/builder/src/sender/conditional.rs b/crates/builder/src/sender/conditional.rs deleted file mode 100644 index e02f78a68..000000000 --- a/crates/builder/src/sender/conditional.rs +++ /dev/null @@ -1,103 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::sync::Arc; - -use anyhow::Context; -use ethers::{ - middleware::SignerMiddleware, - providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256}, -}; -use ethers_signers::Signer; -use rundler_sim::ExpectedStorage; -use serde_json::json; -use tonic::async_trait; - -use super::{fill_and_sign, Result, SentTxInfo, TransactionSender, TxStatus}; - -pub(crate) struct ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - // The `SignerMiddleware` specifically needs to wrap a `Provider`, and not - // just any `Middleware`, because `.request()` is only on `Provider` and not - // on `Middleware`. - provider: SignerMiddleware>, S>, -} - -#[async_trait] -impl TransactionSender for ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - async fn send_transaction( - &self, - tx: TypedTransaction, - expected_storage: &ExpectedStorage, - ) -> Result { - let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; - - let tx_hash = self - .provider - .provider() - .request( - "eth_sendRawTransactionConditional", - (raw_tx, json!({ "knownAccounts": expected_storage })), - ) - .await?; - - Ok(SentTxInfo { nonce, tx_hash }) - } - - async fn get_transaction_status(&self, tx_hash: H256) -> Result { - let tx = self - .provider - .get_transaction(tx_hash) - .await - .context("provider should return transaction status")?; - Ok(match tx { - None => TxStatus::Dropped, - Some(tx) => match tx.block_number { - None => TxStatus::Pending, - Some(block_number) => TxStatus::Mined { - block_number: block_number.as_u64(), - }, - }, - }) - } - - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(PendingTransaction::new(tx_hash, self.provider.inner()) - .await - .context("should wait for transaction to be mined or dropped")?) - } - - fn address(&self) -> Address { - self.provider.address() - } -} - -impl ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - pub(crate) fn new(provider: Arc>, signer: S) -> Self { - Self { - provider: SignerMiddleware::new(provider, signer), - } - } -} diff --git a/crates/builder/src/sender/flashbots.rs b/crates/builder/src/sender/flashbots.rs index bad8c6bf3..eecaf1144 100644 --- a/crates/builder/src/sender/flashbots.rs +++ b/crates/builder/src/sender/flashbots.rs @@ -26,8 +26,7 @@ use ethers::{ middleware::SignerMiddleware, providers::{interval, JsonRpcClient, Middleware, Provider}, types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, - U256, U64, + transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, H256, U256, U64, }, utils, }; @@ -37,8 +36,9 @@ use futures_util::{Stream, StreamExt, TryFutureExt}; use pin_project::pin_project; use reqwest::{ header::{HeaderMap, HeaderValue, CONTENT_TYPE}, - Client, + Client, Response, }; +use rundler_types::GasFees; use serde::{de, Deserialize, Serialize}; use serde_json::{json, Value}; use tonic::async_trait; @@ -46,6 +46,7 @@ use tonic::async_trait; use super::{ fill_and_sign, ExpectedStorage, Result, SentTxInfo, TransactionSender, TxSenderError, TxStatus, }; +use crate::sender::CancelTxInfo; #[derive(Debug)] pub(crate) struct FlashbotsTransactionSender { @@ -75,6 +76,28 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + tx_hash: H256, + _nonce: U256, + _to: Address, + _gas_fees: GasFees, + ) -> Result { + let success = self + .flashbots_client + .cancel_private_transaction(tx_hash) + .await?; + + if !success { + return Err(TxSenderError::SoftCancelFailed); + } + + Ok(CancelTxInfo { + tx_hash: H256::zero(), + soft_cancelled: true, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let status = self.flashbots_client.status(tx_hash).await?; Ok(match status.status { @@ -181,13 +204,31 @@ struct Refund { #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] -struct FlashbotsPrivateTransaction { +struct FlashbotsSendPrivateTransactionRequest { tx: Bytes, #[serde(skip_serializing_if = "Option::is_none")] max_block_number: Option, preferences: Preferences, } +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsSendPrivateTransactionResponse { + result: H256, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsCancelPrivateTransactionRequest { + tx_hash: H256, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsCancelPrivateTransactionResponse { + result: bool, +} + #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] #[allow(dead_code)] @@ -277,7 +318,7 @@ where "jsonrpc": "2.0", "method": "eth_sendPrivateTransaction", "params": [ - FlashbotsPrivateTransaction { + FlashbotsSendPrivateTransactionRequest { tx: raw_tx, max_block_number: None, preferences, @@ -285,6 +326,37 @@ where "id": 1 }); + let response = self.sign_send_request(body).await?; + + let parsed_response = response + .json::() + .await + .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; + + Ok(parsed_response.result) + } + + async fn cancel_private_transaction(&self, tx_hash: H256) -> anyhow::Result { + let body = json!({ + "jsonrpc": "2.0", + "method": "eth_cancelPrivateTransaction", + "params": [ + FlashbotsCancelPrivateTransactionRequest { tx_hash } + ], + "id": 1 + }); + + let response = self.sign_send_request(body).await?; + + let parsed_response = response + .json::() + .await + .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; + + Ok(parsed_response.result) + } + + async fn sign_send_request(&self, body: Value) -> anyhow::Result { let signature = self .signer .sign_message(format!( @@ -302,29 +374,16 @@ where headers.insert("x-flashbots-signature", header_val); // Send the request - let response = self - .http_client + self.http_client .post(&self.relay_url) .headers(headers) .body(body.to_string()) .send() .await - .map_err(|e| anyhow!("failed to send transaction to Flashbots: {:?}", e))?; - - let parsed_response = response - .json::() - .await - .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; - - Ok(parsed_response.result) + .map_err(|e| anyhow!("failed to send request to Flashbots: {:?}", e)) } } -#[derive(Deserialize, Debug)] -struct FlashbotsResponse { - result: TxHash, -} - type PinBoxFut<'a, T> = Pin> + Send + 'a>>; enum PendingFlashbotsTxState<'a> { diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index e958e574b..fcc0ed70d 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -12,7 +12,6 @@ // If not, see https://www.gnu.org/licenses/. mod bloxroute; -mod conditional; mod flashbots; mod raw; use std::{sync::Arc, time::Duration}; @@ -20,13 +19,13 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context; use async_trait::async_trait; pub(crate) use bloxroute::PolygonBloxrouteTransactionSender; -pub(crate) use conditional::ConditionalTransactionSender; use enum_dispatch::enum_dispatch; use ethers::{ prelude::SignerMiddleware, providers::{JsonRpcClient, Middleware, Provider, ProviderError}, types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, H256, U256, + transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest, + TransactionReceipt, H256, U256, }, }; use ethers_signers::{LocalWallet, Signer}; @@ -35,12 +34,22 @@ pub(crate) use flashbots::FlashbotsTransactionSender; use mockall::automock; pub(crate) use raw::RawTransactionSender; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; + #[derive(Debug)] pub(crate) struct SentTxInfo { pub(crate) nonce: U256, pub(crate) tx_hash: H256, } +#[derive(Debug)] +pub(crate) struct CancelTxInfo { + pub(crate) tx_hash: H256, + // True if the transaction was soft-cancelled. Soft-cancellation is when the RPC endpoint + // accepts the cancel without an onchain transaction. + pub(crate) soft_cancelled: bool, +} + #[derive(Debug)] pub(crate) enum TxStatus { Pending, @@ -54,6 +63,15 @@ pub(crate) enum TxSenderError { /// Replacement transaction was underpriced #[error("replacement transaction underpriced")] ReplacementUnderpriced, + /// Nonce too low + #[error("nonce too low")] + NonceTooLow, + /// Conditional value not met + #[error("storage slot value condition not met")] + ConditionNotMet, + /// Soft cancellation failed + #[error("soft cancel failed")] + SoftCancelFailed, /// All other errors #[error(transparent)] Other(#[from] anyhow::Error), @@ -71,6 +89,14 @@ pub(crate) trait TransactionSender: Send + Sync + 'static { expected_storage: &ExpectedStorage, ) -> Result; + async fn cancel_transaction( + &self, + tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result; + async fn get_transaction_status(&self, tx_hash: H256) -> Result; async fn wait_until_mined(&self, tx_hash: H256) -> Result>; @@ -86,7 +112,6 @@ where FS: Signer + 'static, { Raw(RawTransactionSender), - Conditional(ConditionalTransactionSender), Flashbots(FlashbotsTransactionSender), PolygonBloxroute(PolygonBloxrouteTransactionSender), } @@ -97,8 +122,6 @@ where pub enum TransactionSenderKind { /// Raw transaction sender Raw, - /// Conditional transaction sender - Conditional, /// Flashbots transaction sender Flashbots, /// Bloxroute transaction sender @@ -109,15 +132,26 @@ pub enum TransactionSenderKind { #[derive(Debug, Clone)] pub enum TransactionSenderArgs { /// Raw transaction sender - Raw, - /// Conditional transaction sender - Conditional, + Raw(RawSenderArgs), /// Flashbots transaction sender Flashbots(FlashbotsSenderArgs), /// Bloxroute transaction sender Bloxroute(BloxrouteSenderArgs), } +/// Raw sender arguments +#[derive(Debug, Clone)] +pub struct RawSenderArgs { + /// Submit URL + pub submit_url: String, + /// Use submit for status + pub use_submit_for_status: bool, + /// If the "dropped" status is supported by the status provider + pub dropped_status_supported: bool, + /// If the sender should use the conditional endpoint + pub use_conditional_rpc: bool, +} + /// Bloxroute sender arguments #[derive(Debug, Clone)] pub struct BloxrouteSenderArgs { @@ -141,21 +175,37 @@ pub struct FlashbotsSenderArgs { impl TransactionSenderArgs { pub(crate) fn into_sender( self, - client: Arc>, + rpc_provider: Arc>, + submit_provider: Option>>, signer: S, eth_poll_interval: Duration, ) -> std::result::Result, SenderConstructorErrors> { let sender = match self { - Self::Raw => TransactionSenderEnum::Raw(RawTransactionSender::new(client, signer)), - Self::Conditional => TransactionSenderEnum::Conditional( - ConditionalTransactionSender::new(client, signer), - ), + Self::Raw(args) => { + let (provider, submitter) = if let Some(submit_provider) = submit_provider { + if args.use_submit_for_status { + (Arc::clone(&submit_provider), submit_provider) + } else { + (rpc_provider, submit_provider) + } + } else { + (Arc::clone(&rpc_provider), rpc_provider) + }; + + TransactionSenderEnum::Raw(RawTransactionSender::new( + provider, + submitter, + signer, + args.dropped_status_supported, + args.use_conditional_rpc, + )) + } Self::Flashbots(args) => { let flashbots_signer = args.auth_key.parse().context("should parse auth key")?; TransactionSenderEnum::Flashbots(FlashbotsTransactionSender::new( - client, + rpc_provider, signer, flashbots_signer, args.builders, @@ -165,7 +215,7 @@ impl TransactionSenderArgs { } Self::Bloxroute(args) => { TransactionSenderEnum::PolygonBloxroute(PolygonBloxrouteTransactionSender::new( - client, + rpc_provider, signer, eth_poll_interval, &args.header, @@ -210,6 +260,22 @@ where Ok((tx.rlp_signed(&signature), nonce)) } +fn create_hard_cancel_tx( + from: Address, + to: Address, + nonce: U256, + gas_fees: GasFees, +) -> TypedTransaction { + Eip1559TransactionRequest::new() + .from(from) + .to(to) + .nonce(nonce) + .max_fee_per_gas(gas_fees.max_fee_per_gas) + .max_priority_fee_per_gas(gas_fees.max_priority_fee_per_gas) + .data(Bytes::new()) + .into() +} + impl From for TxSenderError { fn from(value: ProviderError) -> Self { match &value { @@ -217,6 +283,16 @@ impl From for TxSenderError { if let Some(e) = e.as_error_response() { if e.message.contains("replacement transaction underpriced") { return TxSenderError::ReplacementUnderpriced; + } else if e.message.contains("nonce too low") { + return TxSenderError::NonceTooLow; + // Arbitrum conditional sender error message + // TODO push them to use a specific error code and to return the specific slot that is not met. + } else if e + .message + .to_lowercase() + .contains("storage slot value condition not met") + { + return TxSenderError::ConditionNotMet; } } TxSenderError::Other(value.into()) diff --git a/crates/builder/src/sender/raw.rs b/crates/builder/src/sender/raw.rs index afaacab95..3e23f454f 100644 --- a/crates/builder/src/sender/raw.rs +++ b/crates/builder/src/sender/raw.rs @@ -18,13 +18,17 @@ use async_trait::async_trait; use ethers::{ middleware::SignerMiddleware, providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256}, + types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256, U256}, }; use ethers_signers::Signer; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; +use serde_json::json; -use super::Result; -use crate::sender::{fill_and_sign, SentTxInfo, TransactionSender, TxStatus}; +use super::{CancelTxInfo, Result}; +use crate::sender::{ + create_hard_cancel_tx, fill_and_sign, SentTxInfo, TransactionSender, TxStatus, +}; #[derive(Debug)] pub(crate) struct RawTransactionSender @@ -32,10 +36,13 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { + provider: Arc>, // The `SignerMiddleware` specifically needs to wrap a `Provider`, and not // just any `Middleware`, because `.request()` is only on `Provider` and not // on `Middleware`. - provider: SignerMiddleware>, S>, + submitter: SignerMiddleware>, S>, + dropped_status_supported: bool, + use_conditional_rpc: bool, } #[async_trait] @@ -47,16 +54,49 @@ where async fn send_transaction( &self, tx: TypedTransaction, - _expected_storage: &ExpectedStorage, + expected_storage: &ExpectedStorage, ) -> Result { - let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; + let (raw_tx, nonce) = fill_and_sign(&self.submitter, tx).await?; + + let tx_hash = if self.use_conditional_rpc { + self.submitter + .provider() + .request( + "eth_sendRawTransactionConditional", + (raw_tx, json!({ "knownAccounts": expected_storage })), + ) + .await? + } else { + self.submitter + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await? + }; + + Ok(SentTxInfo { nonce, tx_hash }) + } + + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.submitter.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.submitter, tx).await?; let tx_hash = self - .provider + .submitter .provider() .request("eth_sendRawTransaction", (raw_tx,)) .await?; - Ok(SentTxInfo { nonce, tx_hash }) + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) } async fn get_transaction_status(&self, tx_hash: H256) -> Result { @@ -66,7 +106,13 @@ where .await .context("provider should return transaction status")?; Ok(match tx { - None => TxStatus::Dropped, + None => { + if self.dropped_status_supported { + TxStatus::Dropped + } else { + TxStatus::Pending + } + } Some(tx) => match tx.block_number { None => TxStatus::Pending, Some(block_number) => TxStatus::Mined { @@ -83,7 +129,7 @@ where } fn address(&self) -> Address { - self.provider.address() + self.submitter.address() } } @@ -92,9 +138,18 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { - pub(crate) fn new(provider: Arc>, signer: S) -> Self { + pub(crate) fn new( + provider: Arc>, + submitter: Arc>, + signer: S, + dropped_status_supported: bool, + use_conditional_rpc: bool, + ) -> Self { Self { - provider: SignerMiddleware::new(provider, signer), + provider, + submitter: SignerMiddleware::new(submitter, signer), + dropped_status_supported, + use_conditional_rpc, } } } diff --git a/crates/builder/src/server/local.rs b/crates/builder/src/server/local.rs index 76bc0cf87..fa6758ab3 100644 --- a/crates/builder/src/server/local.rs +++ b/crates/builder/src/server/local.rs @@ -186,9 +186,6 @@ impl LocalBuilderServerRunner { SendBundleResult::NoOperationsInitially => { Err(anyhow::anyhow!("no ops to send").into()) }, - SendBundleResult::NoOperationsAfterFeeIncreases { .. } => { - Err(anyhow::anyhow!("bundle initially had operations, but after increasing gas fees it was empty").into()) - }, SendBundleResult::StalledAtMaxFeeIncreases => Err(anyhow::anyhow!("stalled at max fee increases").into()), SendBundleResult::Error(e) => Err(anyhow::anyhow!("send bundle error: {e:?}").into()), } diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 91c41190b..0ed558e36 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -62,8 +62,8 @@ pub struct Args { /// True if using unsafe mode pub unsafe_mode: bool, /// Private key to use for signing transactions - /// If not provided, AWS KMS will be used - pub private_key: Option, + /// If empty, AWS KMS will be used + pub private_keys: Vec, /// AWS KMS key ids to use for signing transactions /// Only used if private_key is not provided pub aws_kms_key_ids: Vec, @@ -77,8 +77,6 @@ pub struct Args { pub max_bundle_size: u64, /// Maximum bundle size in gas limit pub max_bundle_gas: u64, - /// URL to submit bundles too - pub submit_url: String, /// Percentage to add to the network priority fee for the bundle priority fee pub bundle_priority_fee_overhead_percent: u64, /// Priority fee mode to use for operation priority fee minimums @@ -133,6 +131,11 @@ where async fn run(mut self: Box, shutdown_token: CancellationToken) -> anyhow::Result<()> { let provider = rundler_provider::new_provider(&self.args.rpc_url, Some(self.args.eth_poll_interval))?; + let submit_provider = if let TransactionSenderArgs::Raw(args) = &self.args.sender_args { + Some(rundler_provider::new_provider(&args.submit_url, None)?) + } else { + None + }; let ep_v0_6 = EthersEntryPointV0_6::new( self.args.chain_spec.entry_point_address_v0_6, @@ -149,19 +152,32 @@ where let mut sender_handles = vec![]; let mut bundle_sender_actions = vec![]; + let mut pk_iter = self.args.private_keys.clone().into_iter(); for ep in &self.args.entry_points { match ep.version { EntryPointVersion::V0_6 => { let (handles, actions) = self - .create_builders_v0_6(ep, Arc::clone(&provider), ep_v0_6.clone()) + .create_builders_v0_6( + ep, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_6.clone(), + &mut pk_iter, + ) .await?; sender_handles.extend(handles); bundle_sender_actions.extend(actions); } EntryPointVersion::V0_7 => { let (handles, actions) = self - .create_builders_v0_7(ep, Arc::clone(&provider), ep_v0_7.clone()) + .create_builders_v0_7( + ep, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_7.clone(), + &mut pk_iter, + ) .await?; sender_handles.extend(handles); bundle_sender_actions.extend(actions); @@ -242,11 +258,13 @@ where Box::new(self) } - async fn create_builders_v0_6( + async fn create_builders_v0_6( &self, ep: &EntryPointBuilderSettings, provider: Arc>, + submit_provider: Option>>, ep_v0_6: E, + pk_iter: &mut I, ) -> anyhow::Result<( Vec>>, Vec>, @@ -254,6 +272,7 @@ where where C: JsonRpcClient + 'static, E: EntryPointProvider + Clone, + I: Iterator, { info!("Mempool config for ep v0.6: {:?}", ep.mempool_configs); let mut sender_handles = vec![]; @@ -263,18 +282,21 @@ where self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_6.clone(), UnsafeSimulator::new( Arc::clone(&provider), ep_v0_6.clone(), self.args.sim_settings, ), + pk_iter, ) .await? } else { self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_6.clone(), simulation::new_v0_6_simulator( Arc::clone(&provider), @@ -282,6 +304,7 @@ where self.args.sim_settings, ep.mempool_configs.clone(), ), + pk_iter, ) .await? }; @@ -291,11 +314,13 @@ where Ok((sender_handles, bundle_sender_actions)) } - async fn create_builders_v0_7( + async fn create_builders_v0_7( &self, ep: &EntryPointBuilderSettings, provider: Arc>, + submit_provider: Option>>, ep_v0_7: E, + pk_iter: &mut I, ) -> anyhow::Result<( Vec>>, Vec>, @@ -303,6 +328,7 @@ where where C: JsonRpcClient + 'static, E: EntryPointProvider + Clone, + I: Iterator, { info!("Mempool config for ep v0.7: {:?}", ep.mempool_configs); let mut sender_handles = vec![]; @@ -312,18 +338,21 @@ where self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_7.clone(), UnsafeSimulator::new( Arc::clone(&provider), ep_v0_7.clone(), self.args.sim_settings, ), + pk_iter, ) .await? } else { self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_7.clone(), simulation::new_v0_7_simulator( Arc::clone(&provider), @@ -331,6 +360,7 @@ where self.args.sim_settings, ep.mempool_configs.clone(), ), + pk_iter, ) .await? }; @@ -340,12 +370,14 @@ where Ok((sender_handles, bundle_sender_actions)) } - async fn create_bundle_builder( + async fn create_bundle_builder( &self, index: u64, provider: Arc>, + submit_provider: Option>>, entry_point: E, simulator: S, + pk_iter: &mut I, ) -> anyhow::Result<( JoinHandle>, mpsc::Sender, @@ -356,10 +388,11 @@ where E: EntryPointProvider + Clone, S: Simulator, C: JsonRpcClient + 'static, + I: Iterator, { let (send_bundle_tx, send_bundle_rx) = mpsc::channel(1); - let signer = if let Some(pk) = &self.args.private_key { + let signer = if let Some(pk) = pk_iter.next() { info!("Using local signer"); BundlerSigner::Local( LocalSigner::connect( @@ -403,20 +436,14 @@ where bundle_priority_fee_overhead_percent: self.args.bundle_priority_fee_overhead_percent, }; - let submit_provider = rundler_provider::new_provider( - &self.args.submit_url, - Some(self.args.eth_poll_interval), - )?; - let transaction_sender = self.args.sender_args.clone().into_sender( + Arc::clone(&provider), submit_provider, signer, self.args.eth_poll_interval, )?; let tracker_settings = transaction_tracker::Settings { - poll_interval: self.args.eth_poll_interval, - max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, replacement_fee_percent_increase: self.args.replacement_fee_percent_increase, }; @@ -429,8 +456,8 @@ where .await?; let builder_settings = bundle_sender::Settings { - replacement_fee_percent_increase: self.args.replacement_fee_percent_increase, max_fee_increases: self.args.max_fee_increases, + max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, }; let proposer = BundleProposerImpl::new( diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index 1dbe8f99d..4514951f6 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -11,16 +11,17 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::{bail, Context}; use async_trait::async_trait; -use ethers::types::{transaction::eip2718::TypedTransaction, H256, U256}; +use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; +#[cfg(test)] +use mockall::automock; use rundler_provider::Provider; use rundler_sim::ExpectedStorage; use rundler_types::GasFees; -use tokio::time; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use crate::sender::{TransactionSender, TxSenderError, TxStatus}; @@ -34,39 +35,60 @@ use crate::sender::{TransactionSender, TxSenderError, TxStatus}; /// succeeded (potentially not the most recent one) or whether circumstances /// have changed so that it is worth making another attempt. #[async_trait] +#[cfg_attr(test, automock)] pub(crate) trait TransactionTracker: Send + Sync + 'static { - fn get_nonce_and_required_fees(&self) -> anyhow::Result<(U256, Option)>; + /// Returns the current nonce and the required fees for the next transaction. + fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)>; /// Sends the provided transaction and typically returns its transaction /// hash, but if the transaction failed to send because another transaction /// with the same nonce mined first, then returns information about that /// transaction instead. async fn send_transaction( - &self, + &mut self, tx: TypedTransaction, expected_stroage: &ExpectedStorage, - ) -> anyhow::Result; + ) -> TransactionTrackerResult; - /// Waits until one of the following occurs: + /// Cancel the latest transaction in the tracker. /// + /// Returns: An option containing the hash of the transaction that was used to cancel. If the option + /// is empty, then either no transaction was cancelled or the cancellation was a "soft-cancel." + async fn cancel_transaction( + &mut self, + to: Address, + estimated_fees: GasFees, + ) -> TransactionTrackerResult>; + + /// Checks: /// 1. One of our transactions mines (not necessarily the one just sent). /// 2. All our send transactions have dropped. /// 3. Our nonce has changed but none of our transactions mined. This means /// that a transaction from our account other than one of the ones we are /// tracking has mined. This should not normally happen. /// 4. Several new blocks have passed. - async fn wait_for_update(&self) -> anyhow::Result; + async fn check_for_update(&mut self) -> TransactionTrackerResult>; - /// Like `wait_for_update`, except it returns immediately if there is no - /// update rather than waiting for several new blocks. - async fn check_for_update_now(&self) -> anyhow::Result>; + /// Resets the tracker to its initial state + async fn reset(&mut self); } -pub(crate) enum SendResult { - TxHash(H256), - TrackerUpdate(TrackerUpdate), +/// Errors that can occur while using a `TransactionTracker`. +#[derive(Debug, thiserror::Error)] +pub(crate) enum TransactionTrackerError { + #[error("nonce too low")] + NonceTooLow, + #[error("replacement transaction underpriced")] + ReplacementUnderpriced, + #[error("storage slot value condition not met")] + ConditionNotMet, + /// All other errors + #[error(transparent)] + Other(#[from] anyhow::Error), } +pub(crate) type TransactionTrackerResult = std::result::Result; + #[derive(Debug)] #[allow(dead_code)] pub(crate) enum TrackerUpdate { @@ -78,26 +100,16 @@ pub(crate) enum TrackerUpdate { gas_limit: Option, gas_used: Option, }, - StillPendingAfterWait, LatestTxDropped { nonce: U256, }, NonceUsedForOtherTx { nonce: U256, }, - ReplacementUnderpriced, } #[derive(Debug)] -pub(crate) struct TransactionTrackerImpl( - tokio::sync::Mutex>, -) -where - P: Provider, - T: TransactionSender; - -#[derive(Debug)] -struct TransactionTrackerImplInner +pub(crate) struct TransactionTrackerImpl where P: Provider, T: TransactionSender, @@ -114,8 +126,6 @@ where #[derive(Clone, Copy, Debug)] pub(crate) struct Settings { - pub(crate) poll_interval: Duration, - pub(crate) max_blocks_to_wait_for_mine: u64, pub(crate) replacement_fee_percent_increase: u64, } @@ -126,33 +136,6 @@ struct PendingTransaction { attempt_number: u64, } -#[async_trait] -impl TransactionTracker for TransactionTrackerImpl -where - P: Provider, - T: TransactionSender, -{ - fn get_nonce_and_required_fees(&self) -> anyhow::Result<(U256, Option)> { - Ok(self.inner()?.get_nonce_and_required_fees()) - } - - async fn send_transaction( - &self, - tx: TypedTransaction, - expected_storage: &ExpectedStorage, - ) -> anyhow::Result { - self.inner()?.send_transaction(tx, expected_storage).await - } - - async fn wait_for_update(&self) -> anyhow::Result { - self.inner()?.wait_for_update().await - } - - async fn check_for_update_now(&self) -> anyhow::Result> { - self.inner()?.check_for_update_now().await - } -} - impl TransactionTrackerImpl where P: Provider, @@ -163,31 +146,6 @@ where sender: T, settings: Settings, builder_index: u64, - ) -> anyhow::Result { - let inner = - TransactionTrackerImplInner::new(provider, sender, settings, builder_index).await?; - Ok(Self(tokio::sync::Mutex::new(inner))) - } - - fn inner( - &self, - ) -> anyhow::Result>> { - self.0 - .try_lock() - .context("tracker should not be called while waiting for a transaction") - } -} - -impl TransactionTrackerImplInner -where - P: Provider, - T: TransactionSender, -{ - async fn new( - provider: Arc

, - sender: T, - settings: Settings, - builder_index: u64, ) -> anyhow::Result { let nonce = provider .get_transaction_count(sender.address()) @@ -205,7 +163,84 @@ where }) } - fn get_nonce_and_required_fees(&self) -> (U256, Option) { + fn set_nonce_and_clear_state(&mut self, nonce: U256) { + self.nonce = nonce; + self.transactions.clear(); + self.has_dropped = false; + self.attempt_count = 0; + self.update_metrics(); + } + + async fn get_external_nonce(&self) -> anyhow::Result { + self.provider + .get_transaction_count(self.sender.address()) + .await + .context("tracker should load current nonce from provider") + } + + fn validate_transaction(&self, tx: &TypedTransaction) -> anyhow::Result<()> { + let Some(&nonce) = tx.nonce() else { + bail!("transaction given to tracker should have nonce set"); + }; + let gas_fees = GasFees::from(tx); + let (required_nonce, required_gas_fees) = self.get_nonce_and_required_fees()?; + if nonce != required_nonce { + bail!("tried to send transaction with nonce {nonce}, but should match tracker's nonce of {required_nonce}"); + } + if let Some(required_gas_fees) = required_gas_fees { + if gas_fees.max_fee_per_gas < required_gas_fees.max_fee_per_gas + || gas_fees.max_priority_fee_per_gas < required_gas_fees.max_priority_fee_per_gas + { + bail!("new transaction's gas fees should be at least the required fees") + } + } + Ok(()) + } + + fn update_metrics(&self) { + TransactionTrackerMetrics::set_num_pending_transactions( + self.builder_index, + self.transactions.len(), + ); + TransactionTrackerMetrics::set_nonce(self.builder_index, self.nonce); + TransactionTrackerMetrics::set_attempt_count(self.builder_index, self.attempt_count); + if let Some(tx) = self.transactions.last() { + TransactionTrackerMetrics::set_current_fees(self.builder_index, Some(tx.gas_fees)); + } else { + TransactionTrackerMetrics::set_current_fees(self.builder_index, None); + } + } + + async fn get_mined_tx_gas_info( + &self, + tx_hash: H256, + ) -> anyhow::Result<(Option, Option)> { + let (tx, tx_receipt) = tokio::try_join!( + self.provider.get_transaction(tx_hash), + self.provider.get_transaction_receipt(tx_hash), + )?; + let gas_limit = tx.map(|t| t.gas).or_else(|| { + warn!("failed to fetch transaction data for tx: {}", tx_hash); + None + }); + let gas_used = match tx_receipt { + Some(r) => r.gas_used, + None => { + warn!("failed to fetch transaction receipt for tx: {}", tx_hash); + None + } + }; + Ok((gas_limit, gas_used)) + } +} + +#[async_trait] +impl TransactionTracker for TransactionTrackerImpl +where + P: Provider, + T: TransactionSender, +{ + fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)> { let gas_fees = if self.has_dropped { None } else { @@ -214,24 +249,17 @@ where .increase_by_percent(self.settings.replacement_fee_percent_increase) }) }; - (self.nonce, gas_fees) + Ok((self.nonce, gas_fees)) } async fn send_transaction( &mut self, tx: TypedTransaction, expected_storage: &ExpectedStorage, - ) -> anyhow::Result { + ) -> TransactionTrackerResult { self.validate_transaction(&tx)?; let gas_fees = GasFees::from(&tx); - let send_result = self.sender.send_transaction(tx, expected_storage).await; - let sent_tx = match send_result { - Ok(sent_tx) => sent_tx, - Err(error) => { - let tracker_update = self.handle_send_error(error).await?; - return Ok(SendResult::TrackerUpdate(tracker_update)); - } - }; + let sent_tx = self.sender.send_transaction(tx, expected_storage).await?; info!( "Sent transaction {:?} nonce: {:?}", sent_tx.tx_hash, sent_tx.nonce @@ -244,61 +272,66 @@ where self.has_dropped = false; self.attempt_count += 1; self.update_metrics(); - Ok(SendResult::TxHash(sent_tx.tx_hash)) + Ok(sent_tx.tx_hash) } - /// When we fail to send a transaction, it may be because another - /// transaction has mined before it could be sent, invalidating the nonce. - /// Thus, do one last check for an update before returning the error. - async fn handle_send_error(&mut self, error: TxSenderError) -> anyhow::Result { - match &error { - TxSenderError::ReplacementUnderpriced => { - return Ok(TrackerUpdate::ReplacementUnderpriced) + async fn cancel_transaction( + &mut self, + to: Address, + estimated_fees: GasFees, + ) -> TransactionTrackerResult> { + let (tx_hash, gas_fees) = match self.transactions.last() { + Some(tx) => { + let increased_fees = tx + .gas_fees + .increase_by_percent(self.settings.replacement_fee_percent_increase); + let gas_fees = GasFees { + max_fee_per_gas: increased_fees + .max_fee_per_gas + .max(estimated_fees.max_fee_per_gas), + max_priority_fee_per_gas: increased_fees + .max_priority_fee_per_gas + .max(estimated_fees.max_priority_fee_per_gas), + }; + (tx.tx_hash, gas_fees) } - TxSenderError::Other(_error) => {} - } - - let update = self.check_for_update_now().await?; - let Some(update) = update else { - return Err(error.into()); + None => (H256::zero(), estimated_fees), }; - match &update { - TrackerUpdate::StillPendingAfterWait | TrackerUpdate::LatestTxDropped { .. } => { - Err(error.into()) - } - _ => Ok(update), - } - } - async fn wait_for_update(&mut self) -> anyhow::Result { - let start_block_number = self - .provider - .get_block_number() - .await - .context("tracker should get starting block when waiting for update")?; - let end_block_number = start_block_number + self.settings.max_blocks_to_wait_for_mine; - loop { - let update = self.check_for_update_now().await?; - if let Some(update) = update { - return Ok(update); - } - let current_block_number = self - .provider - .get_block_number() - .await - .context("tracker should get current block when polling for updates")?; - if end_block_number <= current_block_number { - return Ok(TrackerUpdate::StillPendingAfterWait); - } - time::sleep(self.settings.poll_interval).await; + let cancel_info = self + .sender + .cancel_transaction(tx_hash, self.nonce, to, gas_fees) + .await?; + + if cancel_info.soft_cancelled { + // If the transaction was soft-cancelled. Reset internal state. + self.reset().await; + return Ok(None); } + + info!("Sent cancellation tx {:?}", cancel_info.tx_hash); + + self.transactions.push(PendingTransaction { + tx_hash: cancel_info.tx_hash, + gas_fees, + attempt_number: self.attempt_count, + }); + + self.has_dropped = false; + self.attempt_count += 1; + self.update_metrics(); + Ok(Some(cancel_info.tx_hash)) } - async fn check_for_update_now(&mut self) -> anyhow::Result> { + async fn check_for_update(&mut self) -> TransactionTrackerResult> { let external_nonce = self.get_external_nonce().await?; if self.nonce < external_nonce { // The nonce has changed. Check to see which of our transactions has // mined, if any. + debug!( + "Nonce has changed from {:?} to {:?}", + self.nonce, external_nonce + ); let mut out = TrackerUpdate::NonceUsedForOtherTx { nonce: self.nonce }; for tx in self.transactions.iter().rev() { @@ -307,6 +340,7 @@ where .get_transaction_status(tx.tx_hash) .await .context("tracker should check transaction status when the nonce changes")?; + info!("Status of tx {:?}: {:?}", tx.tx_hash, status); if let TxStatus::Mined { block_number } = status { let (gas_limit, gas_used) = self.get_mined_tx_gas_info(tx.tx_hash).await?; out = TrackerUpdate::Mined { @@ -340,7 +374,7 @@ where .await .context("tracker should check for dropped transactions")?; Ok(match status { - TxStatus::Pending | TxStatus::Dropped => None, + TxStatus::Pending => None, TxStatus::Mined { block_number } => { let nonce = self.nonce; self.set_nonce_and_clear_state(nonce + 1); @@ -353,82 +387,33 @@ where gas_limit, gas_used, }) - } // TODO(#295): dropped status is often incorrect, for now just assume its still pending - // TxStatus::Dropped => { - // self.has_dropped = true; - // Some(TrackerUpdate::LatestTxDropped { nonce: self.nonce }) - // } + } + TxStatus::Dropped => { + self.has_dropped = true; + Some(TrackerUpdate::LatestTxDropped { nonce: self.nonce }) + } }) } - fn set_nonce_and_clear_state(&mut self, nonce: U256) { - self.nonce = nonce; - self.transactions.clear(); - self.has_dropped = false; - self.attempt_count = 0; - self.update_metrics(); - } - - async fn get_external_nonce(&self) -> anyhow::Result { - self.provider - .get_transaction_count(self.sender.address()) - .await - .context("tracker should load current nonce from provider") + async fn reset(&mut self) { + let nonce = self.get_external_nonce().await.unwrap_or(self.nonce); + self.set_nonce_and_clear_state(nonce); } +} - fn validate_transaction(&self, tx: &TypedTransaction) -> anyhow::Result<()> { - let Some(&nonce) = tx.nonce() else { - bail!("transaction given to tracker should have nonce set"); - }; - let gas_fees = GasFees::from(tx); - let (required_nonce, required_gas_fees) = self.get_nonce_and_required_fees(); - if nonce != required_nonce { - bail!("tried to send transaction with nonce {nonce}, but should match tracker's nonce of {required_nonce}"); - } - if let Some(required_gas_fees) = required_gas_fees { - if gas_fees.max_fee_per_gas < required_gas_fees.max_fee_per_gas - || gas_fees.max_priority_fee_per_gas < required_gas_fees.max_priority_fee_per_gas - { - bail!("new transaction's gas fees should be at least the required fees") +impl From for TransactionTrackerError { + fn from(value: TxSenderError) -> Self { + match value { + TxSenderError::NonceTooLow => TransactionTrackerError::NonceTooLow, + TxSenderError::ReplacementUnderpriced => { + TransactionTrackerError::ReplacementUnderpriced } - } - Ok(()) - } - - fn update_metrics(&self) { - TransactionTrackerMetrics::set_num_pending_transactions( - self.builder_index, - self.transactions.len(), - ); - TransactionTrackerMetrics::set_nonce(self.builder_index, self.nonce); - TransactionTrackerMetrics::set_attempt_count(self.builder_index, self.attempt_count); - if let Some(tx) = self.transactions.last() { - TransactionTrackerMetrics::set_current_fees(self.builder_index, Some(tx.gas_fees)); - } else { - TransactionTrackerMetrics::set_current_fees(self.builder_index, None); - } - } - - async fn get_mined_tx_gas_info( - &self, - tx_hash: H256, - ) -> anyhow::Result<(Option, Option)> { - let (tx, tx_receipt) = tokio::try_join!( - self.provider.get_transaction(tx_hash), - self.provider.get_transaction_receipt(tx_hash), - )?; - let gas_limit = tx.map(|t| t.gas).or_else(|| { - warn!("failed to fetch transaction data for tx: {}", tx_hash); - None - }); - let gas_used = match tx_receipt { - Some(r) => r.gas_used, - None => { - warn!("failed to fetch transaction receipt for tx: {}", tx_hash); - None + TxSenderError::ConditionNotMet => TransactionTrackerError::ConditionNotMet, + TxSenderError::SoftCancelFailed => { + TransactionTrackerError::Other(anyhow::anyhow!("soft cancel failed")) } - }; - Ok((gas_limit, gas_used)) + TxSenderError::Other(e) => TransactionTrackerError::Other(e), + } } } @@ -482,8 +467,6 @@ mod tests { provider: MockProvider, ) -> TransactionTrackerImpl { let settings = Settings { - poll_interval: Duration::from_secs(0), - max_blocks_to_wait_for_mine: 3, replacement_fee_percent_increase: 5, }; @@ -512,7 +495,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new() .nonce(0) @@ -536,50 +519,44 @@ mod tests { ); } - // TODO(#295): fix dropped status - // #[tokio::test] - // async fn test_nonce_and_fees_dropped() { - // let (mut sender, mut provider) = create_base_config(); - // sender.expect_address().return_const(Address::zero()); - - // sender - // .expect_get_transaction_status() - // .returning(move |_a| Box::pin(async { Ok(TxStatus::Dropped) })); + #[tokio::test] + async fn test_nonce_and_fees_dropped() { + let (mut sender, mut provider) = create_base_config(); + sender.expect_address().return_const(Address::zero()); - // sender.expect_send_transaction().returning(move |_a, _b| { - // Box::pin(async { - // Ok(SentTxInfo { - // nonce: U256::from(0), - // tx_hash: H256::zero(), - // }) - // }) - // }); + sender + .expect_get_transaction_status() + .returning(move |_a| Box::pin(async { Ok(TxStatus::Dropped) })); - // provider - // .expect_get_transaction_count() - // .returning(move |_a| Ok(U256::from(0))); + sender.expect_send_transaction().returning(move |_a, _b| { + Box::pin(async { + Ok(SentTxInfo { + nonce: U256::from(0), + tx_hash: H256::zero(), + }) + }) + }); - // provider - // .expect_get_block_number() - // .returning(move || Ok(1)) - // .times(1); + provider + .expect_get_transaction_count() + .returning(move |_a| Ok(U256::from(0))); - // let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; - // let tx = Eip1559TransactionRequest::new() - // .nonce(0) - // .gas(10000) - // .max_fee_per_gas(10000); - // let exp = ExpectedStorage::default(); + let tx = Eip1559TransactionRequest::new() + .nonce(0) + .gas(10000) + .max_fee_per_gas(10000); + let exp = ExpectedStorage::default(); - // // send dummy transaction - // let _sent = tracker.send_transaction(tx.into(), &exp).await; - // let _tracker_update = tracker.wait_for_update().await.unwrap(); + // send dummy transaction + let _sent = tracker.send_transaction(tx.into(), &exp).await; + let _tracker_update = tracker.check_for_update().await.unwrap(); - // let nonce_and_fees = tracker.get_nonce_and_required_fees().unwrap(); + let nonce_and_fees = tracker.get_nonce_and_required_fees().unwrap(); - // assert_eq!((U256::from(0), None), nonce_and_fees); - // } + assert_eq!((U256::from(0), None), nonce_and_fees); + } #[tokio::test] async fn test_send_transaction_without_nonce() { @@ -598,7 +575,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(2))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new(); let exp = ExpectedStorage::default(); @@ -625,7 +602,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(2))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); @@ -651,41 +628,11 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); - let sent_transaction = tracker.send_transaction(tx.into(), &exp).await.unwrap(); - - assert!(matches!(sent_transaction, SendResult::TxHash(..))); - } - - #[tokio::test] - async fn test_wait_for_update_still_pending() { - let (mut sender, mut provider) = create_base_config(); - sender.expect_address().return_const(Address::zero()); - - let mut s = Sequence::new(); - - provider - .expect_get_transaction_count() - .returning(move |_a| Ok(U256::from(0))); - - for block_number in 1..=4 { - provider - .expect_get_block_number() - .returning(move || Ok(block_number)) - .times(1) - .in_sequence(&mut s); - } - - let tracker = create_tracker(sender, provider).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); - - assert!(matches!( - tracker_update, - TrackerUpdate::StillPendingAfterWait - )); + tracker.send_transaction(tx.into(), &exp).await.unwrap(); } // TODO(#295): fix dropped status @@ -727,7 +674,7 @@ mod tests { // } #[tokio::test] - async fn test_wait_for_update_nonce_used() { + async fn test_check_for_update_nonce_used() { let (mut sender, mut provider) = create_base_config(); sender.expect_address().return_const(Address::zero()); @@ -740,14 +687,9 @@ mod tests { .in_sequence(&mut provider_seq); } - provider - .expect_get_block_number() - .returning(move || Ok(1)) - .times(1); - - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); + let tracker_update = tracker.check_for_update().await.unwrap().unwrap(); assert!(matches!( tracker_update, @@ -756,7 +698,7 @@ mod tests { } #[tokio::test] - async fn test_wait_for_update_mined() { + async fn test_check_for_update_mined() { let (mut sender, mut provider) = create_base_config(); sender.expect_address().return_const(Address::zero()); sender @@ -776,11 +718,6 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - provider - .expect_get_block_number() - .returning(move || Ok(1)) - .times(1); - provider.expect_get_transaction().returning(|_: H256| { Ok(Some(Transaction { gas: U256::from(0), @@ -797,14 +734,14 @@ mod tests { })) }); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); // send dummy transaction let _sent = tracker.send_transaction(tx.into(), &exp).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); + let tracker_update = tracker.check_for_update().await.unwrap().unwrap(); assert!(matches!(tracker_update, TrackerUpdate::Mined { .. })); } diff --git a/crates/provider/src/ethers/provider.rs b/crates/provider/src/ethers/provider.rs index 739d7ad3f..baa757249 100644 --- a/crates/provider/src/ethers/provider.rs +++ b/crates/provider/src/ethers/provider.rs @@ -29,8 +29,9 @@ use ethers::{ }, }; use reqwest::Url; -use rundler_types::contracts::utils::get_gas_used::{ - GasUsedResult, GetGasUsed, GETGASUSED_DEPLOYED_BYTECODE, +use rundler_types::contracts::utils::{ + get_gas_used::{GasUsedResult, GetGasUsed, GETGASUSED_DEPLOYED_BYTECODE}, + storage_loader::STORAGELOADER_DEPLOYED_BYTECODE, }; use serde::{de::DeserializeOwned, Serialize}; @@ -207,6 +208,47 @@ impl Provider for EthersProvider { .await .context("should get gas used")?) } + + async fn batch_get_storage_at( + &self, + address: Address, + slots: Vec, + ) -> ProviderResult> { + let mut state_overrides = spoof::State::default(); + state_overrides + .account(address) + .code(STORAGELOADER_DEPLOYED_BYTECODE.clone()); + + let expected_ret_size = slots.len() * 32; + let slot_data = slots + .into_iter() + .flat_map(|slot| slot.to_fixed_bytes()) + .collect::>(); + + let tx: TypedTransaction = Eip1559TransactionRequest { + to: Some(address.into()), + data: Some(slot_data.into()), + ..Default::default() + } + .into(); + + let result_bytes = self + .call_raw(&tx) + .state(&state_overrides) + .await + .context("should call storage loader")?; + + if result_bytes.len() != expected_ret_size { + return Err(anyhow::anyhow!( + "expected {} bytes, got {}", + expected_ret_size, + result_bytes.len() + ) + .into()); + } + + Ok(result_bytes.chunks(32).map(H256::from_slice).collect()) + } } impl From for ProviderError { diff --git a/crates/provider/src/traits/provider.rs b/crates/provider/src/traits/provider.rs index 86a313c8e..0aac930b1 100644 --- a/crates/provider/src/traits/provider.rs +++ b/crates/provider/src/traits/provider.rs @@ -137,4 +137,11 @@ pub trait Provider: Send + Sync + Debug + 'static { data: Bytes, state_overrides: spoof::State, ) -> ProviderResult; + + /// Get the storage values at a given address and slots + async fn batch_get_storage_at( + &self, + address: Address, + slots: Vec, + ) -> ProviderResult>; } diff --git a/crates/types/build.rs b/crates/types/build.rs index 6bbb22fa4..f69d11246 100644 --- a/crates/types/build.rs +++ b/crates/types/build.rs @@ -88,6 +88,7 @@ fn generate_utils_bindings() -> Result<(), Box> { MultiAbigen::from_abigens([ abigen_of("utils", "GetCodeHashes")?, abigen_of("utils", "GetGasUsed")?, + abigen_of("utils", "StorageLoader")?, ]) .build()? .write_to_module("src/contracts/utils", false)?; diff --git a/crates/types/contracts/foundry.toml b/crates/types/contracts/foundry.toml index 0705faad6..ea3c24180 100644 --- a/crates/types/contracts/foundry.toml +++ b/crates/types/contracts/foundry.toml @@ -4,7 +4,7 @@ out = 'out' libs = ['lib'] test = 'test' cache_path = 'cache' -solc_version = '0.8.23' +solc_version = '0.8.26' remappings = [ 'forge-std/=lib/forge-std/src', diff --git a/crates/types/contracts/src/utils/StorageLoader.sol b/crates/types/contracts/src/utils/StorageLoader.sol new file mode 100644 index 000000000..1cdab1d1f --- /dev/null +++ b/crates/types/contracts/src/utils/StorageLoader.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.25; + +contract StorageLoader { + fallback() external payable { + assembly { + let cursor := 0 + + for {} lt(cursor, calldatasize()) {cursor := add(cursor, 0x20)} { + let slot := calldataload(cursor) + mstore(cursor, sload(slot)) + } + + return(0, cursor) + } + } +} diff --git a/docs/cli.md b/docs/cli.md index 6249d3b02..9936c69b5 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -166,10 +166,13 @@ List of command line options for configuring the Builder. - *Only required when running in distributed mode* - `--builder.private_key`: Private key to use for signing transactions - env: *BUILDER_PRIVATE_KEY* - - *Always used if provided. If not provided builder.aws_kms_key_ids is used* + - **DEPRECATED**: Use `--builder.private_keys` instead. If both used this is added to the list. +- `--builder.private_keys`: Private keys to use for signing transactions, separated by `,` + - env: *BUILDER_PRIVATE_KEYS* - `--builder.aws_kms_key_ids`: AWS KMS key IDs to use for signing transactions (comma-separated) - env: *BUILDER_AWS_KMS_KEY_IDS* - *Only required if BUILDER_PRIVATE_KEY is not provided* + - *Cannot use `builder.private_keys` and `builder.aws_kms_key_ids` at the same time* - `--builder.redis_uri`: Redis URI to use for KMS leasing (default: `""`) - env: *BUILDER_REDIS_URI* - *Only required when AWS_KMS_KEY_IDS are provided* @@ -178,22 +181,28 @@ List of command line options for configuring the Builder. - *Only required when AWS_KMS_KEY_IDS are provided* - `--builder.max_bundle_size`: Maximum number of ops to include in one bundle (default: `128`) - env: *BUILDER_MAX_BUNDLE_SIZE* -- `--builder.submit_url`: If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. - - env: *BUILDER_SUBMIT_URL* -- `--builder.sender`: Choice of what sender type to use for transaction submission. (default: `raw`, options: `raw`, `conditional`, `flashbots`, `polygon_bloxroute`) - - env: *BUILDER_SENDER* - `--builder.max_blocks_to_wait_for_mine`: After submitting a bundle transaction, the maximum number of blocks to wait for that transaction to mine before trying to resend with higher gas fees (default: `2`) - env: *BUILDER_MAX_BLOCKS_TO_WAIT_FOR_MINE* - `--builder.replacement_fee_percent_increase`: Percentage amount to increase gas fees when retrying a transaction after it failed to mine (default: `10`) - env: *BUILDER_REPLACEMENT_FEE_PERCENT_INCREASE* - `--builder.max_fee_increases`: Maximum number of fee increases to attempt (Seven increases of 10% is roughly 2x the initial fees) (default: `7`) - env: *BUILDER_MAX_FEE_INCREASES* -- `--builder.flashbots_relay_builders`: additional builders to send bundles to through the Flashbots relay RPC (comma-separated). List of builders that the Flashbots RPC supports can be found [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendprivatetransaction). (default: `flashbots`) -- `--builder.flashbots_relay_auth_key`: authorization key to use with the flashbots relay. See [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#authentication) for more info. (default: None) +- `--builder.sender`: Choice of what sender type to use for transaction submission. (default: `raw`, options: `raw`, `flashbots`, `polygon_bloxroute`) + - env: *BUILDER_SENDER* +- `--builder.submit_url`: Only used if builder.sender == "raw." If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. + - env: *BUILDER_SUBMIT_URL* +- `--builder.use_submit_for_status`: Only used if builder.sender == "raw." Use the submit url to get the status of the bundle transaction. (default: `false`) + - env: *BUILDER_USE_SUBMIT_FOR_STATUS* +- `--builder.use_conditional_rpc`: Only used if builder.sender == "raw." Use `eth_sendRawTransactionConditional` when submitting. (default: `false`) + - env: *BUILDER_USE_CONDITIONAL_RPC* +- `--builder.dropped_status_unsupported`: Only used if builder.sender == "raw." If set, the builder will not process a dropped status. Use this if the URL that is being used for status (node_http or submit_url) does not support pending transactions, only those that are mined. (default: `false`) + - env: *BUILDER_DROPPED_STATUS_UNSUPPORTED* +- `--builder.flashbots_relay_builders`: Only used if builder.sender == "flashbots." Additional builders to send bundles to through the Flashbots relay RPC (comma-separated). List of builders that the Flashbots RPC supports can be found [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendprivatetransaction). (default: `flashbots`) - env: *BUILDER_FLASHBOTS_RELAY_BUILDERS* -- `--builder.bloxroute_auth_header`: If using the bloxroute transaction sender on Polygon, this is the auth header to supply with the requests. (default: None) +- `--builder.flashbots_relay_auth_key`: Only used/required if builder.sender == "flashbots." Authorization key to use with the flashbots relay. See [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#authentication) for more info. (default: None) + - env: *BUILDER_FLASHBOTS_RELAY_AUTH_KEY* +- `--builder.bloxroute_auth_header`: Only used/required if builder.sender == "polygon_bloxroute." If using the bloxroute transaction sender on Polygon, this is the auth header to supply with the requests. (default: None) - env: `BUILDER_BLOXROUTE_AUTH_HEADER` - - *Only required when `--builder.sender=polygon_bloxroute`* - `--builder.index_offset`: If running multiple builder processes, this is the index offset to assign unique indexes to each bundle sender. (default: 0) - env: `BUILDER_INDEX_OFFSET` - `--builder.pool_url`: If running in distributed mode, the URL of the pool server to use. @@ -214,11 +223,11 @@ Here are some example commands to use the CLI: ```sh # Run the Node subcommand with custom options -$ ./rundler node --entry_points 0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 --chain_id 1337 --max_verification_gas 10000000 +$ ./rundler node --chain_id 1337 --max_verification_gas 10000000 --disable_entry_point_v0_6 # Run the RPC subcommand with custom options and enable JSON logging. The builder and pool will need to be running before this starts. -$ ./rundler rpc --node_http http://localhost:8545 --log.json +$ ./rundler rpc --node_http http://localhost:8545 --log.json --disable_entry_point_v0_6 # Run the Pool subcommand with custom options and specify a mempool config file -$ ./rundler pool --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --chain_id 8453 +$ ./rundler pool --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --chain_id 8453 --disable_entry_point_v0_6 ```