From 2ec342e9da05ba2247d6168bb591dc77df647ee5 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Fri, 14 Jun 2024 16:18:06 -0500 Subject: [PATCH 01/17] fix(pool): fix race condition in paymaster tracking --- crates/pool/src/mempool/paymaster.rs | 136 +++++++----------- crates/pool/src/mempool/uo_pool.rs | 199 ++++++++++++++++++++------- crates/rpc/src/eth/error.rs | 3 +- 3 files changed, 197 insertions(+), 141 deletions(-) diff --git a/crates/pool/src/mempool/paymaster.rs b/crates/pool/src/mempool/paymaster.rs index 92baf007d..af2a8a483 100644 --- a/crates/pool/src/mempool/paymaster.rs +++ b/crates/pool/src/mempool/paymaster.rs @@ -26,7 +26,7 @@ use rundler_types::{ use rundler_utils::cache::LruMap; use super::MempoolResult; -use crate::chain::{BalanceUpdate, MinedOp}; +use crate::chain::MinedOp; /// Keeps track of current and pending paymaster balances #[derive(Debug)] @@ -94,28 +94,6 @@ where Ok(stake_status) } - pub(crate) fn update_paymaster_balances_after_update<'a>( - &self, - entity_balance_updates: impl Iterator, - unmined_entity_balance_updates: impl Iterator, - ) { - for balance_update in entity_balance_updates { - self.state.write().update_paymaster_balance_from_event( - balance_update.address, - balance_update.amount, - balance_update.is_addition, - ) - } - - for unmined_balance_update in unmined_entity_balance_updates { - self.state.write().update_paymaster_balance_from_event( - unmined_balance_update.address, - unmined_balance_update.amount, - !unmined_balance_update.is_addition, - ) - } - } - pub(crate) async fn paymaster_balance( &self, paymaster: Address, @@ -174,7 +152,20 @@ where self.state.write().set_tracking(tracking_enabled); } - pub(crate) async fn reset_confimed_balances(&self) -> MempoolResult<()> { + pub(crate) async fn reset_confirmed_balances_for( + &self, + addresses: &[Address], + ) -> MempoolResult<()> { + let balances = self.entry_point.get_balances(addresses.to_vec()).await?; + + self.state + .write() + .set_confimed_balances(addresses, &balances); + + Ok(()) + } + + pub(crate) async fn reset_confirmed_balances(&self) -> MempoolResult<()> { let paymaster_addresses = self.paymaster_addresses(); let balances = self @@ -194,6 +185,7 @@ where .write() .update_paymaster_balance_from_mined_op(mined_op); } + pub(crate) fn remove_operation(&self, id: &UserOperationId) { self.state.write().remove_operation(id); } @@ -324,21 +316,6 @@ impl PaymasterTrackerInner { keys } - fn update_paymaster_balance_from_event( - &mut self, - paymaster: Address, - amount: U256, - should_add: bool, - ) { - if let Some(paymaster_balance) = self.paymaster_balances.get(&paymaster) { - if should_add { - paymaster_balance.confirmed = paymaster_balance.confirmed.saturating_add(amount); - } else { - paymaster_balance.confirmed = paymaster_balance.confirmed.saturating_sub(amount); - } - } - } - fn paymaster_metadata(&self, paymaster: Address) -> Option { if let Some(paymaster_balance) = self.paymaster_balances.peek(&paymaster) { return Some(PaymasterMetadata { @@ -530,7 +507,7 @@ mod tests { }; use super::*; - use crate::{chain::BalanceUpdate, mempool::paymaster::PaymasterTracker}; + use crate::mempool::paymaster::PaymasterTracker; fn demo_pool_op(uo: UserOperation) -> PoolOperation { PoolOperation { @@ -609,55 +586,6 @@ mod tests { assert!(res.is_err()); } - #[tokio::test] - async fn test_update_balance() { - let paymaster_tracker = new_paymaster_tracker(); - - let paymaster = Address::random(); - let pending_op_cost = U256::from(100); - let confirmed_balance = U256::from(1000); - - paymaster_tracker.add_new_paymaster(paymaster, confirmed_balance, pending_op_cost); - - let deposit = BalanceUpdate { - address: paymaster, - entrypoint: Address::random(), - amount: 100.into(), - is_addition: true, - }; - - // deposit - paymaster_tracker - .update_paymaster_balances_after_update(vec![&deposit].into_iter(), vec![].into_iter()); - - let balance = paymaster_tracker - .paymaster_balance(paymaster) - .await - .unwrap(); - - assert_eq!(balance.confirmed_balance, 1100.into()); - - let withdrawal = BalanceUpdate { - address: paymaster, - entrypoint: Address::random(), - amount: 50.into(), - is_addition: false, - }; - - // withdrawal - paymaster_tracker.update_paymaster_balances_after_update( - vec![&withdrawal].into_iter(), - vec![].into_iter(), - ); - - let balance = paymaster_tracker - .paymaster_balance(paymaster) - .await - .unwrap(); - - assert_eq!(balance.confirmed_balance, 1050.into()); - } - #[tokio::test] async fn new_uo_not_enough_balance_tracking_disabled() { let paymaster_tracker = new_paymaster_tracker(); @@ -731,7 +659,35 @@ mod tests { assert_eq!(balance_0.confirmed_balance, 1000.into()); - let _ = paymaster_tracker.reset_confimed_balances().await; + let _ = paymaster_tracker.reset_confirmed_balances().await; + + let balance_0 = paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(); + + assert_eq!(balance_0.confirmed_balance, 50.into()); + } + + #[tokio::test] + async fn test_reset_balances_for() { + let paymaster_tracker = new_paymaster_tracker(); + + let paymaster_0 = Address::random(); + let paymaster_0_confimed = 1000.into(); + + paymaster_tracker.add_new_paymaster(paymaster_0, paymaster_0_confimed, 0.into()); + + let balance_0 = paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(); + + assert_eq!(balance_0.confirmed_balance, 1000.into()); + + let _ = paymaster_tracker + .reset_confirmed_balances_for(&[paymaster_0]) + .await; let balance_0 = paymaster_tracker .paymaster_balance(paymaster_0) diff --git a/crates/pool/src/mempool/uo_pool.rs b/crates/pool/src/mempool/uo_pool.rs index 5b1ad0bba..8a31e05e8 100644 --- a/crates/pool/src/mempool/uo_pool.rs +++ b/crates/pool/src/mempool/uo_pool.rs @@ -151,15 +151,24 @@ where .iter() .filter(|op| op.entry_point == self.config.entry_point); - let entity_balance_updates = update - .entity_balance_updates - .iter() - .filter(|u| u.entrypoint == self.config.entry_point); + let entity_balance_updates = update.entity_balance_updates.iter().filter_map(|u| { + if u.entrypoint == self.config.entry_point { + Some(u.address) + } else { + None + } + }); let unmined_entity_balance_updates = update .unmined_entity_balance_updates .iter() - .filter(|u| u.entrypoint == self.config.entry_point); + .filter_map(|u| { + if u.entrypoint == self.config.entry_point { + Some(u.address) + } else { + None + } + }); let unmined_ops = deduped_ops .unmined_ops @@ -168,15 +177,6 @@ where let mut mined_op_count = 0; let mut unmined_op_count = 0; - if update.reorg_larger_than_history { - let _ = self.reset_confirmed_paymaster_balances().await; - } - - self.paymaster.update_paymaster_balances_after_update( - entity_balance_updates, - unmined_entity_balance_updates, - ); - for op in mined_ops { if op.entry_point != self.config.entry_point { continue; @@ -199,6 +199,7 @@ where mined_op_count += 1; } } + for op in unmined_ops { if op.entry_point != self.config.entry_point { continue; @@ -220,21 +221,43 @@ where let _ = self.paymaster.add_or_update_balance(&po).await; } } + + // Update paymaster balances AFTER updating the pool to reset confirmed balances if needed. + if update.reorg_larger_than_history { + if let Err(e) = self.reset_confirmed_paymaster_balances().await { + tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); + } + } else { + let addresses = entity_balance_updates + .chain(unmined_entity_balance_updates) + .unique() + .collect::>(); + if !addresses.is_empty() { + if let Err(e) = self + .paymaster + .reset_confirmed_balances_for(&addresses) + .await + { + tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); + } + } + } + if mined_op_count > 0 { info!( - "{mined_op_count} op(s) mined on entry point {:?} when advancing to block with number {}, hash {:?}.", - self.config.entry_point, - update.latest_block_number, - update.latest_block_hash, - ); + "{mined_op_count} op(s) mined on entry point {:?} when advancing to block with number {}, hash {:?}.", + self.config.entry_point, + update.latest_block_number, + update.latest_block_hash, + ); } if unmined_op_count > 0 { info!( - "{unmined_op_count} op(s) unmined in reorg on entry point {:?} when advancing to block with number {}, hash {:?}.", - self.config.entry_point, - update.latest_block_number, - update.latest_block_hash, - ); + "{unmined_op_count} op(s) unmined in reorg on entry point {:?} when advancing to block with number {}, hash {:?}.", + self.config.entry_point, + update.latest_block_number, + update.latest_block_hash, + ); } UoPoolMetrics::update_ops_seen( mined_op_count as isize - unmined_op_count as isize, @@ -323,7 +346,7 @@ where } async fn reset_confirmed_paymaster_balances(&self) -> MempoolResult<()> { - self.paymaster.reset_confimed_balances().await + self.paymaster.reset_confirmed_balances().await } async fn get_stake_status(&self, address: Address) -> MempoolResult { @@ -680,6 +703,7 @@ mod tests { use std::collections::HashMap; use ethers::types::{Bytes, H160}; + use mockall::Sequence; use rundler_provider::{DepositInfo, MockEntryPointV0_6}; use rundler_sim::{ MockPrechecker, MockSimulator, PrecheckError, PrecheckSettings, SimulationError, @@ -764,11 +788,25 @@ mod tests { #[tokio::test] async fn chain_update_mine() { let paymaster = Address::random(); - let (pool, uos) = create_pool_insert_ops(vec![ - create_op(Address::random(), 0, 3, None), - create_op(Address::random(), 0, 2, None), - create_op(Address::random(), 0, 1, Some(paymaster)), - ]) + + let mut entrypoint = MockEntryPointV0_6::new(); + // initial balance + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + // after updates + entrypoint + .expect_get_balances() + .returning(|_| Ok(vec![1110.into()])); + + let (pool, uos) = create_pool_with_entrypoint_insert_ops( + vec![ + create_op(Address::random(), 0, 3, None), + create_op(Address::random(), 0, 2, None), + create_op(Address::random(), 0, 1, Some(paymaster)), + ], + entrypoint, + ) .await; check_ops(pool.best_operations(3, 0).unwrap(), uos.clone()); @@ -819,7 +857,7 @@ mod tests { create_op(Address::random(), 0, 1, Some(paymaster)), ]; - // add pending max cost of 30 for each uo + // add pending max cost of 50 for each uo for op in &mut ops { let uo: &mut UserOperation = op.op.as_mut(); uo.call_gas_limit = 10.into(); @@ -828,7 +866,29 @@ mod tests { uo.max_fee_per_gas = 1.into(); } - let (pool, uos) = create_pool_insert_ops(ops).await; + let mut entrypoint = MockEntryPointV0_6::new(); + // initial balance, pending = 850 + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + // after updates + let mut seq = Sequence::new(); + // one UO mined with actual cost of 10, unmine deposit of 10, mine deposit 100 + // confirmed = 1000 - 10 - 10 + 100 = 1080. Pending = 1080 - 50*2 = 980 + entrypoint + .expect_get_balances() + .once() + .in_sequence(&mut seq) + .returning(|_| Ok(vec![1080.into()])); + // Unmine UO of 10, unmine deposit of 100 + // confirmed = 1080 + 10 - 100 = 990. Pending = 990 - 50*3 = 840 + entrypoint + .expect_get_balances() + .once() + .in_sequence(&mut seq) + .returning(|_| Ok(vec![990.into()])); + + let (pool, uos) = create_pool_with_entrypoint_insert_ops(ops, entrypoint).await; let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); assert_eq!(metadata.pending_balance, 850.into()); @@ -872,7 +932,7 @@ mod tests { ); let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); - assert_eq!(metadata.pending_balance, 1000.into()); + assert_eq!(metadata.pending_balance, 980.into()); pool.on_chain_update(&ChainUpdate { latest_block_number: 1, @@ -901,7 +961,7 @@ mod tests { .await; let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); - assert_eq!(metadata.pending_balance, 850.into()); + assert_eq!(metadata.pending_balance, 840.into()); check_ops(pool.best_operations(3, 0).unwrap(), uos); } @@ -1105,8 +1165,13 @@ mod tests { uo.pre_verification_gas = 1000.into(); uo.max_fee_per_gas = 1.into(); + let mut entrypoint = MockEntryPointV0_6::new(); + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + let uo = op.op.clone(); - let pool = create_pool(vec![op]); + let pool = create_pool_with_entry_point(vec![op], entrypoint); let ret = pool .add_operation(OperationOrigin::Local, uo.clone()) @@ -1204,7 +1269,17 @@ mod tests { #[tokio::test] async fn test_stake_status_not_staked() { - let mut pool = create_pool(vec![]); + let mut entrypoint = MockEntryPointV0_6::new(); + entrypoint.expect_get_deposit_info().returning(|_| { + Ok(DepositInfo { + deposit: 1000.into(), + staked: true, + stake: 10000, + unstake_delay_sec: 100, + withdraw_time: 10, + }) + }); + let mut pool = create_pool_with_entry_point(vec![], entrypoint); pool.config.sim_settings.min_stake_value = 10001; pool.config.sim_settings.min_unstake_delay = 101; @@ -1225,7 +1300,11 @@ mod tests { uo.pre_verification_gas = 10.into(); uo.max_fee_per_gas = 1.into(); - let pool = create_pool(vec![op.clone()]); + let mut entrypoint = MockEntryPointV0_6::new(); + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + let pool = create_pool_with_entry_point(vec![op.clone()], entrypoint); let _ = pool .add_operation(OperationOrigin::Local, op.op.clone()) @@ -1399,6 +1478,19 @@ mod tests { impl Prechecker, impl Simulator, impl EntryPoint, + > { + let entrypoint = MockEntryPointV0_6::new(); + create_pool_with_entry_point(ops, entrypoint) + } + + fn create_pool_with_entry_point( + ops: Vec, + entrypoint: MockEntryPointV0_6, + ) -> UoPool< + UserOperation, + impl Prechecker, + impl Simulator, + impl EntryPoint, > { let args = PoolConfig { entry_point: Address::random(), @@ -1423,20 +1515,7 @@ mod tests { let mut simulator = MockSimulator::new(); let mut prechecker = MockPrechecker::new(); - let mut entrypoint = MockEntryPointV0_6::new(); - entrypoint.expect_get_deposit_info().returning(|_| { - Ok(DepositInfo { - deposit: 1000.into(), - staked: true, - stake: 10000, - unstake_delay_sec: 100, - withdraw_time: 10, - }) - }); - entrypoint - .expect_balance_of() - .returning(|_, _| Ok(U256::from(1000))); let paymaster = PaymasterTracker::new( entrypoint, PaymasterConfig::new( @@ -1509,6 +1588,26 @@ mod tests { ) } + async fn create_pool_with_entrypoint_insert_ops( + ops: Vec, + entrypoint: MockEntryPointV0_6, + ) -> ( + UoPool< + UserOperation, + impl Prechecker, + impl Simulator, + impl EntryPoint, + >, + Vec, + ) { + let uos = ops.iter().map(|op| op.op.clone()).collect::>(); + let pool = create_pool_with_entry_point(ops, entrypoint); + for op in &uos { + let _ = pool.add_operation(OperationOrigin::Local, op.clone()).await; + } + (pool, uos) + } + async fn create_pool_insert_ops( ops: Vec, ) -> ( diff --git a/crates/rpc/src/eth/error.rs b/crates/rpc/src/eth/error.rs index 6e0858a7c..9bce5e653 100644 --- a/crates/rpc/src/eth/error.rs +++ b/crates/rpc/src/eth/error.rs @@ -42,6 +42,7 @@ const THROTTLED_OR_BANNED_CODE: i32 = -32504; const STAKE_TOO_LOW_CODE: i32 = -32505; const UNSUPORTED_AGGREGATOR_CODE: i32 = -32506; const SIGNATURE_CHECK_FAILED_CODE: i32 = -32507; +const PAYMASTER_DEPOSIT_TOO_LOW: i32 = -32508; const EXECUTION_REVERTED: i32 = -32521; pub(crate) type EthResult = Result; @@ -383,6 +384,7 @@ impl From for ErrorObjectOwned { EthRpcError::PaymasterValidationRejected(data) => { rpc_err_with_data(PAYMASTER_VALIDATION_REJECTED_CODE, msg, data) } + EthRpcError::PaymasterBalanceTooLow(_, _) => rpc_err(PAYMASTER_DEPOSIT_TOO_LOW, msg), EthRpcError::OpcodeViolation(_, _) | EthRpcError::OpcodeViolationMap(_) | EthRpcError::OutOfGas(_) @@ -390,7 +392,6 @@ impl From for ErrorObjectOwned { | EthRpcError::MultipleRolesViolation(_) | EthRpcError::UnstakedPaymasterContext | EthRpcError::SenderAddressUsedAsAlternateEntity(_) - | EthRpcError::PaymasterBalanceTooLow(_, _) | EthRpcError::AssociatedStorageIsAlternateSender | EthRpcError::AssociatedStorageDuringDeploy(_, _, _) | EthRpcError::InvalidStorageAccess(_, _, _) => rpc_err(OPCODE_VIOLATION_CODE, msg), From 9959fb605e5dee508b30b5b5f5f298abe121b6fa Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 18 Jun 2024 16:02:51 -0500 Subject: [PATCH 02/17] feat(pool): add time to mine tracking and metrics --- crates/pool/src/mempool/pool.rs | 128 ++++++++++++++++++++++++++--- crates/pool/src/mempool/uo_pool.rs | 77 +++++++++++------ 2 files changed, 170 insertions(+), 35 deletions(-) diff --git a/crates/pool/src/mempool/pool.rs b/crates/pool/src/mempool/pool.rs index 723615939..817cd9afe 100644 --- a/crates/pool/src/mempool/pool.rs +++ b/crates/pool/src/mempool/pool.rs @@ -12,9 +12,10 @@ // If not, see https://www.gnu.org/licenses/. use std::{ - cmp::Ordering, + cmp::{self, Ordering}, collections::{hash_map::Entry, BTreeSet, HashMap, HashSet}, sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, }; use anyhow::Context; @@ -24,7 +25,7 @@ use ethers::{ }; use rundler_types::{ pool::{MempoolError, PoolOperation}, - Entity, EntityType, Timestamp, UserOperation, UserOperationId, UserOperationVariant, + Entity, EntityType, GasFees, Timestamp, UserOperation, UserOperationId, UserOperationVariant, }; use rundler_utils::math; use tracing::info; @@ -81,6 +82,10 @@ pub(crate) struct PoolInner { pool_size: SizeTracker, /// keeps track of the size of the removed cache in bytes cache_size: SizeTracker, + /// The time of the previous block + prev_sys_block_time: Duration, + /// The number of the previous block + prev_block_number: u64, } impl PoolInner { @@ -96,6 +101,8 @@ impl PoolInner { submission_id: 0, pool_size: SizeTracker::default(), cache_size: SizeTracker::default(), + prev_sys_block_time: Duration::default(), + prev_block_number: 0, } } @@ -145,22 +152,54 @@ impl PoolInner { self.best.clone().into_iter().map(|v| v.po) } - /// Removes all operations using the given entity, returning the hashes of the removed operations. + /// Does maintenance on the pool. + /// + /// 1) Removes all operations using the given entity, returning the hashes of the removed operations. + /// 2) Updates time to mine stats for all operations in the pool. /// /// NOTE: This method is O(n) where n is the number of operations in the pool. /// It should be called sparingly (e.g. when a block is mined). - pub(crate) fn remove_expired(&mut self, expire_before: Timestamp) -> Vec<(H256, Timestamp)> { + pub(crate) fn do_maintenance( + &mut self, + block_number: u64, + block_timestamp: Timestamp, + candidate_gas_fees: GasFees, + base_fee: U256, + ) -> Vec<(H256, Timestamp)> { + let sys_block_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time should be after epoch"); + + let block_delta_time = sys_block_time - self.prev_sys_block_time; + let block_delta_height = block_number - self.prev_block_number; let mut expired = Vec::new(); - for (hash, op) in &self.by_hash { - if op.po.valid_time_range.valid_until < expire_before { + let mut num_candidates = 0; + + for (hash, op) in &mut self.by_hash { + if op.po.valid_time_range.valid_until < block_timestamp { expired.push((*hash, op.po.valid_time_range.valid_until)); } + + num_candidates += if op.update_time_to_mine( + block_delta_time, + block_delta_height, + candidate_gas_fees, + base_fee, + ) { + 1 + } else { + 0 + }; } for (hash, _) in &expired { self.remove_operation_by_hash(*hash); } + PoolMetrics::set_num_candidates(num_candidates, self.config.entry_point); + self.prev_block_number = block_number; + self.prev_sys_block_time = sys_block_time; + expired } @@ -243,6 +282,7 @@ impl PoolInner { block_number: u64, ) -> Option> { let tx_in_pool = self.by_id.get(&mined_op.id())?; + PoolMetrics::record_time_to_mine(&tx_in_pool.time_to_mine, mined_op.entry_point); let hash = tx_in_pool .uo() @@ -380,6 +420,7 @@ impl PoolInner { let pool_op = OrderedPoolOperation { po: op, submission_id: submission_id.unwrap_or_else(|| self.next_submission_id()), + time_to_mine: TimeToMineInfo::new(), }; // update counts @@ -484,6 +525,7 @@ impl PoolInner { struct OrderedPoolOperation { po: Arc, submission_id: u64, + time_to_mine: TimeToMineInfo, } impl OrderedPoolOperation { @@ -494,6 +536,28 @@ impl OrderedPoolOperation { fn mem_size(&self) -> usize { std::mem::size_of::() + self.po.mem_size() } + + fn update_time_to_mine( + &mut self, + block_delta_time: Duration, + block_delta_height: u64, + candidate_gas_fees: GasFees, + base_fee: U256, + ) -> bool { + let candidate_gas_price = base_fee + candidate_gas_fees.max_priority_fee_per_gas; + let uo_gas_price = cmp::min( + self.uo().max_fee_per_gas(), + self.uo().max_priority_fee_per_gas() + base_fee, + ); + + if uo_gas_price >= candidate_gas_price { + self.time_to_mine + .increase(block_delta_time, block_delta_height); + true + } else { + false + } + } } impl Eq for OrderedPoolOperation {} @@ -521,6 +585,26 @@ impl PartialEq for OrderedPoolOperation { } } +#[derive(Debug, Clone)] +struct TimeToMineInfo { + candidate_for_blocks: u64, + candidate_for_time: Duration, +} + +impl TimeToMineInfo { + fn new() -> Self { + Self { + candidate_for_blocks: 0, + candidate_for_time: Duration::default(), + } + } + + fn increase(&mut self, block_delta_time: Duration, block_delta_height: u64) { + self.candidate_for_blocks += block_delta_height; + self.candidate_for_time += block_delta_time; + } +} + struct PoolMetrics {} impl PoolMetrics { @@ -530,12 +614,32 @@ impl PoolMetrics { metrics::gauge!("op_pool_size_bytes", "entry_point" => entry_point.to_string()) .set(size_bytes as f64); } + fn set_cache_metrics(num_ops: usize, size_bytes: isize, entry_point: Address) { metrics::gauge!("op_pool_num_ops_in_cache", "entry_point" => entry_point.to_string()) .set(num_ops as f64); metrics::gauge!("op_pool_cache_size_bytes", "entry_point" => entry_point.to_string()) .set(size_bytes as f64); } + + // Set the number of candidates in the pool, only changes on block boundaries + fn set_num_candidates(num_candidates: usize, entry_point: Address) { + metrics::gauge!("op_pool_num_candidates", "entry_point" => entry_point.to_string()) + .set(num_candidates as f64); + } + + fn record_time_to_mine(time_to_mine: &TimeToMineInfo, entry_point: Address) { + metrics::histogram!( + "op_pool_time_to_mine", + "entry_point" => entry_point.to_string() + ) + .record(time_to_mine.candidate_for_time.as_millis() as f64); + metrics::histogram!( + "op_pool_blocks_to_mine", + "entry_point" => entry_point.to_string() + ) + .record(time_to_mine.candidate_for_blocks as f64); + } } #[cfg(test)] @@ -907,7 +1011,8 @@ mod tests { pool.pool_size, OrderedPoolOperation { po: Arc::new(po1), - submission_id: 0 + submission_id: 0, + time_to_mine: TimeToMineInfo::new() } .mem_size() ); @@ -947,7 +1052,8 @@ mod tests { pool.pool_size, OrderedPoolOperation { po: Arc::new(po2), - submission_id: 0 + submission_id: 0, + time_to_mine: TimeToMineInfo::new(), } .mem_size() ); @@ -979,7 +1085,7 @@ mod tests { po1.valid_time_range.valid_until = Timestamp::from(1); let _ = pool.add_operation(po1.clone()).unwrap(); - let res = pool.remove_expired(Timestamp::from(2)); + let res = pool.do_maintenance(0, Timestamp::from(2), GasFees::default(), 0.into()); assert_eq!(res.len(), 1); assert_eq!(res[0].0, po1.uo.hash(conf.entry_point, conf.chain_id)); assert_eq!(res[0].1, Timestamp::from(1)); @@ -1001,7 +1107,8 @@ mod tests { po3.valid_time_range.valid_until = 9.into(); let _ = pool.add_operation(po3.clone()).unwrap(); - let res = pool.remove_expired(10.into()); + let res = pool.do_maintenance(0, Timestamp::from(10), GasFees::default(), 0.into()); + assert_eq!(res.len(), 2); assert!(res.contains(&(po1.uo.hash(conf.entry_point, conf.chain_id), 5.into()))); assert!(res.contains(&(po3.uo.hash(conf.entry_point, conf.chain_id), 9.into()))); @@ -1022,6 +1129,7 @@ mod tests { OrderedPoolOperation { po: Arc::new(create_op(Address::random(), 1, 1)), submission_id: 1, + time_to_mine: TimeToMineInfo::new(), } .mem_size() } diff --git a/crates/pool/src/mempool/uo_pool.rs b/crates/pool/src/mempool/uo_pool.rs index 8a31e05e8..9ee07f13f 100644 --- a/crates/pool/src/mempool/uo_pool.rs +++ b/crates/pool/src/mempool/uo_pool.rs @@ -25,8 +25,8 @@ use rundler_types::{ pool::{ MempoolError, PaymasterMetadata, PoolOperation, Reputation, ReputationStatus, StakeStatus, }, - Entity, EntityUpdate, EntityUpdateType, EntryPointVersion, UserOperation, UserOperationId, - UserOperationVariant, + Entity, EntityUpdate, EntityUpdateType, EntryPointVersion, GasFees, UserOperation, + UserOperationId, UserOperationVariant, }; use rundler_utils::emit::WithEntryPoint; use tokio::sync::broadcast; @@ -62,6 +62,8 @@ struct UoPoolState { pool: PoolInner, throttled_ops: HashSet, block_number: u64, + gas_fees: GasFees, + base_fee: U256, } impl UoPool @@ -84,6 +86,8 @@ where pool: PoolInner::new(config.clone().into()), throttled_ops: HashSet::new(), block_number: 0, + gas_fees: GasFees::default(), + base_fee: U256::zero(), }), reputation, paymaster, @@ -297,38 +301,61 @@ where }) } - // expire old UOs - let expired = state.pool.remove_expired(update.latest_block_timestamp); + // pool maintenance + let gas_fees = state.gas_fees; + let base_fee = state.base_fee; + let expired = state.pool.do_maintenance( + update.latest_block_number, + update.latest_block_timestamp, + gas_fees, + base_fee, + ); + for (hash, until) in expired { self.emit(OpPoolEvent::RemovedOp { op_hash: hash, reason: OpRemovalReason::Expired { valid_until: until }, }) } - - state.block_number = update.latest_block_number; } // update required bundle fees and update metrics - if let Ok((bundle_fees, base_fee)) = self.prechecker.update_fees().await { - let max_fee = match format_units(bundle_fees.max_fee_per_gas, "gwei") { - Ok(s) => s.parse::().unwrap_or_default(), - Err(_) => 0.0, - }; - UoPoolMetrics::current_max_fee_gwei(max_fee); - - let max_priority_fee = match format_units(bundle_fees.max_priority_fee_per_gas, "gwei") - { - Ok(s) => s.parse::().unwrap_or_default(), - Err(_) => 0.0, - }; - UoPoolMetrics::current_max_priority_fee_gwei(max_priority_fee); - - let base_fee = match format_units(base_fee, "gwei") { - Ok(s) => s.parse::().unwrap_or_default(), - Err(_) => 0.0, - }; - UoPoolMetrics::current_base_fee(base_fee); + match self.prechecker.update_fees().await { + Ok((bundle_fees, base_fee)) => { + let max_fee = match format_units(bundle_fees.max_fee_per_gas, "gwei") { + Ok(s) => s.parse::().unwrap_or_default(), + Err(_) => 0.0, + }; + UoPoolMetrics::current_max_fee_gwei(max_fee); + + let max_priority_fee = + match format_units(bundle_fees.max_priority_fee_per_gas, "gwei") { + Ok(s) => s.parse::().unwrap_or_default(), + Err(_) => 0.0, + }; + UoPoolMetrics::current_max_priority_fee_gwei(max_priority_fee); + + let base_fee_f64 = match format_units(base_fee, "gwei") { + Ok(s) => s.parse::().unwrap_or_default(), + Err(_) => 0.0, + }; + UoPoolMetrics::current_base_fee(base_fee_f64); + + // cache for the next update + { + let mut state = self.state.write(); + state.block_number = update.latest_block_number; + state.gas_fees = bundle_fees; + state.base_fee = base_fee; + } + } + Err(e) => { + tracing::error!("Failed to update fees: {:?}", e); + { + let mut state = self.state.write(); + state.block_number = update.latest_block_number; + } + } } } From f4359397e64c38feee61dcc4721c52c93caea4ad Mon Sep 17 00:00:00 2001 From: dancoombs Date: Fri, 14 Jun 2024 12:23:53 -0500 Subject: [PATCH 03/17] feat(pool): allow staked senders to have multiple UOs in best_operations --- crates/pool/src/mempool/uo_pool.rs | 31 +++++++++++++++++++------ test/spec-tests/local/launcher.sh | 2 +- test/spec-tests/v0_6/bundler-spec-tests | 2 +- test/spec-tests/v0_7/bundler-spec-tests | 2 +- 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/crates/pool/src/mempool/uo_pool.rs b/crates/pool/src/mempool/uo_pool.rs index 9ee07f13f..925de0bef 100644 --- a/crates/pool/src/mempool/uo_pool.rs +++ b/crates/pool/src/mempool/uo_pool.rs @@ -641,8 +641,12 @@ where .div_mod(self.config.num_shards.into()) .1 == shard_index.into())) && - // filter out ops from senders we've already seen - senders.insert(op.uo.sender()) + // filter out ops from unstaked senders we've already seen + if !op.account_is_staked { + senders.insert(op.uo.sender()) + } else { + true + } }) .take(max) .map(Into::into) @@ -1031,13 +1035,13 @@ mod tests { async fn test_account_reputation() { let address = Address::random(); let (pool, uos) = create_pool_insert_ops(vec![ - create_op_with_errors(address, 0, 2, None, None, true), - create_op_with_errors(address, 1, 2, None, None, true), - create_op_with_errors(address, 1, 2, None, None, true), + create_op_with_errors(address, 0, 2, None, None, true), // accept + create_op_with_errors(address, 1, 2, None, None, true), // accept + create_op_with_errors(address, 1, 2, None, None, true), // reject ]) .await; - // Only return 1 op per sender - check_ops(pool.best_operations(3, 0).unwrap(), vec![uos[0].clone()]); + // staked, so include all ops + check_ops(pool.best_operations(3, 0).unwrap(), uos[0..2].to_vec()); let rep = pool.dump_reputation(); assert_eq!(rep.len(), 1); @@ -1489,6 +1493,19 @@ mod tests { .is_err()); } + #[tokio::test] + async fn test_best_staked() { + let address = Address::random(); + let (pool, uos) = create_pool_insert_ops(vec![ + create_op_with_errors(address, 0, 2, None, None, true), + create_op_with_errors(address, 1, 2, None, None, true), + create_op_with_errors(address, 2, 2, None, None, true), + ]) + .await; + // staked, so include all ops + check_ops(pool.best_operations(3, 0).unwrap(), uos); + } + #[derive(Clone, Debug)] struct OpWithErrors { op: UserOperationVariant, diff --git a/test/spec-tests/local/launcher.sh b/test/spec-tests/local/launcher.sh index ca440b695..fb967e5e7 100755 --- a/test/spec-tests/local/launcher.sh +++ b/test/spec-tests/local/launcher.sh @@ -7,7 +7,7 @@ case $1 in start) docker-compose up -d sleep 10 - cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 1ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null + cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 100ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null (cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost) ../../../target/debug/rundler node --log.file out.log & while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:3000/health)" != "200" ]]; do sleep 1 ; done diff --git a/test/spec-tests/v0_6/bundler-spec-tests b/test/spec-tests/v0_6/bundler-spec-tests index 6bed71394..e6e2b1adf 160000 --- a/test/spec-tests/v0_6/bundler-spec-tests +++ b/test/spec-tests/v0_6/bundler-spec-tests @@ -1 +1 @@ -Subproject commit 6bed71394894ed514fc3c35585a3217a40c2e2b3 +Subproject commit e6e2b1adf5b592a45b96829bcb41a4966c566a55 diff --git a/test/spec-tests/v0_7/bundler-spec-tests b/test/spec-tests/v0_7/bundler-spec-tests index 56888c610..032e69d59 160000 --- a/test/spec-tests/v0_7/bundler-spec-tests +++ b/test/spec-tests/v0_7/bundler-spec-tests @@ -1 +1 @@ -Subproject commit 56888c610d00cc8ea1cdefbae0209d158603a8bb +Subproject commit 032e69d598c3cdf09b27b58eb6ee294e4d1580be From 87beea3381af51d92634c3867e6c15a4d5874321 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Wed, 5 Jun 2024 15:41:38 -0500 Subject: [PATCH 04/17] feat(builder): remove polling from transaction tracker, update state machine --- crates/builder/src/bundle_proposer.rs | 6 + crates/builder/src/bundle_sender.rs | 717 +++++++++++++--------- crates/builder/src/sender/mod.rs | 5 + crates/builder/src/task.rs | 4 +- crates/builder/src/transaction_tracker.rs | 390 ++++-------- 5 files changed, 562 insertions(+), 560 deletions(-) diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 01b554fde..49375116b 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -150,6 +150,9 @@ where .map_err(anyhow::Error::from), self.fee_estimator.required_bundle_fees(required_fees) )?; + if ops.is_empty() { + return Ok(Bundle::default()); + } tracing::debug!("Starting bundle proposal with {} ops", ops.len()); @@ -177,6 +180,9 @@ where .collect::>(); tracing::debug!("Bundle proposal after fee limit had {} ops", ops.len()); + if ops.is_empty() { + return Ok(Bundle::default()); + } // (2) Limit the amount of operations for simulation let (ops, gas_limit) = self.limit_user_operations_for_simulation(ops); diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 892c17419..cae080929 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -20,19 +20,22 @@ use futures_util::StreamExt; use rundler_provider::{BundleHandler, EntryPoint}; use rundler_sim::ExpectedStorage; use rundler_types::{ - builder::BundlingMode, chain::ChainSpec, pool::Pool, EntityUpdate, GasFees, UserOperation, + builder::BundlingMode, + chain::ChainSpec, + pool::{NewHead, Pool}, + EntityUpdate, GasFees, UserOperation, }; use rundler_utils::emit::WithEntryPoint; use tokio::{ join, - sync::{broadcast, mpsc, oneshot}, + sync::{broadcast, mpsc, mpsc::UnboundedReceiver, oneshot}, }; -use tracing::{debug, error, info, instrument, trace, warn}; +use tracing::{debug, error, info, instrument, warn}; use crate::{ bundle_proposer::BundleProposer, emit::{BuilderEvent, BundleTxDetails}, - transaction_tracker::{SendResult, TrackerUpdate, TransactionTracker}, + transaction_tracker::{TrackerUpdate, TransactionTracker, TransactionTrackerError}, }; #[async_trait] @@ -42,14 +45,14 @@ pub(crate) trait BundleSender: Send + Sync + 'static { #[derive(Debug)] pub(crate) struct Settings { - pub(crate) replacement_fee_percent_increase: u64, pub(crate) max_fee_increases: u64, + pub(crate) max_blocks_to_wait_for_mine: u64, } #[derive(Debug)] pub(crate) struct BundleSenderImpl { builder_index: u64, - bundle_action_receiver: mpsc::Receiver, + bundle_action_receiver: Option>, chain_spec: ChainSpec, beneficiary: Address, proposer: P, @@ -77,6 +80,9 @@ pub struct SendBundleRequest { pub responder: oneshot::Sender, } +/// Response to a `SendBundleRequest` after +/// going through a full cycle of bundling, sending, +/// and waiting for the transaction to be mined. #[derive(Debug)] pub enum SendBundleResult { Success { @@ -86,13 +92,24 @@ pub enum SendBundleResult { }, NoOperationsInitially, NoOperationsAfterFeeIncreases { - initial_op_count: usize, attempt_number: u64, }, StalledAtMaxFeeIncreases, Error(anyhow::Error), } +// Internal result of attempting to send a bundle. +enum SendBundleAttemptResult { + // The bundle was successfully sent + Success, + // The bundle was empty + NoOperations, + // Replacement Underpriced + ReplacementUnderpriced, + // Nonce too low + NonceTooLow, +} + #[async_trait] impl BundleSender for BundleSenderImpl where @@ -107,139 +124,183 @@ where /// next one. #[instrument(skip_all, fields(entry_point = self.entry_point.address().to_string(), builder_index = self.builder_index))] async fn send_bundles_in_loop(mut self) -> anyhow::Result<()> { - let Ok(mut new_heads) = self.pool.subscribe_new_heads().await else { - error!("Failed to subscribe to new blocks"); - bail!("failed to subscribe to new blocks"); - }; + // State of the sender loop + enum State { + // Building a bundle, optionally waiting for a trigger to send it + // (wait_for_trigger, fee_increase_count) + Building(bool, u64), + // Waiting for a bundle to be mined + // (wait_until_block, fee_increase_count) + Pending(u64, u64), + } - // The new_heads stream can buffer up multiple blocks, but we only want to consume the latest one. - // This task is used to consume the new heads and place them onto a channel that can be synchronously - // consumed until the latest block is reached. - let (tx, mut rx) = mpsc::unbounded_channel(); - tokio::spawn(async move { - loop { - match new_heads.next().await { - Some(b) => { - if tx.send(b).is_err() { - error!("Failed to buffer new block for bundle sender"); - return; - } - } - None => { - error!("Block stream ended"); - return; - } - } - } - }); + // initial state + let mut state = State::Building(true, 0); + + // response to manual caller + let mut send_bundle_response = None; + let mut send_bundle_result = None; + + // trigger for sending bundles + let mut sender_trigger = BundleSenderTrigger::new( + &self.pool, + self.bundle_action_receiver.take().unwrap(), + Duration::from_millis(self.chain_spec.bundle_max_send_interval_millis), + ) + .await?; - let mut bundling_mode = BundlingMode::Auto; - let mut timer = tokio::time::interval(Duration::from_millis( - self.chain_spec.bundle_max_send_interval_millis, - )); loop { - let mut send_bundle_response: Option> = None; - let mut last_block = None; + match state { + State::Building(wait_for_trigger, fee_increase_count) => { + if wait_for_trigger { + send_bundle_response = sender_trigger.wait_for_trigger().await?; - // 3 triggers for loop logic: - // 1 - new block - // - If auto mode, send next bundle - // 2 - timer tick - // - If auto mode, send next bundle - // 3 - action recv - // - If change mode, change and restart loop - // - If send bundle and manual mode, send next bundle - last_block = tokio::select! { - b = rx.recv() => { - match bundling_mode { - BundlingMode::Manual => continue, - BundlingMode::Auto => b + // process any nonce updates, ignore result + self.check_for_transaction_update().await; } - }, - _ = timer.tick() => { - match bundling_mode { - BundlingMode::Manual => continue, - BundlingMode::Auto => Some(last_block.unwrap_or_default()) - } - }, - a = self.bundle_action_receiver.recv() => { - match a { - Some(BundleSenderAction::ChangeMode(mode)) => { - debug!("chainging bundling mode to {mode:?}"); - bundling_mode = mode; - continue; - }, - Some(BundleSenderAction::SendBundle(r)) => { - match bundling_mode { - BundlingMode::Manual => { - send_bundle_response = Some(r.responder); - Some(last_block.unwrap_or_default()) - }, - BundlingMode::Auto => { - error!("Received bundle send action while in auto mode, ignoring"); - continue; - } + + // send bundle + debug!( + "Building bundle on block {}", + sender_trigger.last_block.block_number + ); + let result = self.send_bundle(fee_increase_count).await; + + // handle result + match result { + Ok(SendBundleAttemptResult::Success) => { + // sent the bundle + info!("Bundle sent successfully"); + state = State::Pending( + sender_trigger.last_block.block_number + + self.settings.max_blocks_to_wait_for_mine, + 0, + ); + } + Ok(SendBundleAttemptResult::NoOperations) => { + debug!("No operations in bundle"); + + if fee_increase_count > 0 { + // TODO(danc): when abandoning we tend to get "stuck" where we have a pending bundle + // transaction that isn't landing. We should move to a new state where we track the + // pending transaction and "cancel" it if it doesn't land after a certain number of blocks. + + warn!("Abandoning bundle after fee increases {fee_increase_count}, no operations available, waiting for next trigger"); + BuilderMetrics::increment_bundle_txns_abandoned( + self.builder_index, + self.entry_point.address(), + ); + self.transaction_tracker.reset().await; + send_bundle_result = + Some(SendBundleResult::NoOperationsAfterFeeIncreases { + attempt_number: fee_increase_count, + }) + } else { + debug!("No operations available, waiting for next trigger"); + send_bundle_result = Some(SendBundleResult::NoOperationsInitially); } - }, - None => { - error!("Bundle action recv closed"); - bail!("Bundle action recv closed"); + + state = State::Building(true, 0); } - } - } - }; + Ok(SendBundleAttemptResult::NonceTooLow) => { + // reset the transaction tracker and try again + self.transaction_tracker.reset().await; + state = State::Building(true, 0); + } + Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { + // TODO(danc): handle replacement underpriced + // move to the cancellation state - // Consume any other blocks that may have been buffered up - loop { - match rx.try_recv() { - Ok(b) => { - last_block = Some(b); - } - Err(mpsc::error::TryRecvError::Empty) => { - break; - } - Err(mpsc::error::TryRecvError::Disconnected) => { - error!("Block stream closed"); - bail!("Block stream closed"); + // for now: reset the transaction tracker and try again + self.transaction_tracker.reset().await; + state = State::Building(true, 0); + } + Err(error) => { + error!("Bundle send error {error:?}"); + BuilderMetrics::increment_bundle_txns_failed( + self.builder_index, + self.entry_point.address(), + ); + self.transaction_tracker.reset().await; + send_bundle_result = Some(SendBundleResult::Error(error)); + state = State::Building(true, 0); + } } } - } + State::Pending(until, fee_increase_count) => { + sender_trigger.wait_for_block().await?; + + // check for transaction update + if let Some(update) = self.check_for_transaction_update().await { + match update { + TrackerUpdate::Mined { + block_number, + attempt_number, + tx_hash, + .. + } => { + // mined! + info!("Bundle transaction mined"); + send_bundle_result = Some(SendBundleResult::Success { + block_number, + attempt_number, + tx_hash, + }); + state = State::Building(true, 0); + } + TrackerUpdate::LatestTxDropped { .. } => { + // try again, don't wait for trigger, re-estimate fees + info!("Latest transaction dropped, starting new bundle attempt"); - // Wait for new block. Block number doesn't matter as the pool will only notify of new blocks - // after the pool has updated its state. The bundle will be formed using the latest pool state - // and can land in the next block - self.check_for_and_log_transaction_update().await; - let result = self.send_bundle_with_increasing_gas_fees().await; - match &result { - SendBundleResult::Success { - block_number, - attempt_number, - tx_hash, - } => - if *attempt_number == 0 { - info!("Bundle with hash {tx_hash:?} landed in block {block_number}"); - } else { - info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); + // force reset the transaction tracker + self.transaction_tracker.reset().await; + + state = State::Building(true, 0); + } + TrackerUpdate::NonceUsedForOtherTx { .. } => { + // try again, don't wait for trigger, re-estimate fees + info!("Nonce used externally, starting new bundle attempt"); + state = State::Building(true, 0); + } + } + } else if sender_trigger.last_block().block_number >= until { + if fee_increase_count >= self.settings.max_fee_increases { + // TODO(danc): same "stuck" issue here on abandonment + // this abandon is likely to lead to "transaction underpriced" errors + // Instead, move to cancellation state. + + warn!("Abandoning bundle after max fee increases {fee_increase_count}"); + BuilderMetrics::increment_bundle_txns_abandoned( + self.builder_index, + self.entry_point.address(), + ); + self.transaction_tracker.reset().await; + state = State::Building(true, 0); + } else { + // start replacement, don't wait for trigger + info!( + "Not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + fee_increase_count + 1 + ); + BuilderMetrics::increment_bundle_txn_fee_increases( + self.builder_index, + self.entry_point.address(), + ); + state = State::Building(false, fee_increase_count + 1); + } } - SendBundleResult::NoOperationsInitially => trace!("No ops to send at block {}", last_block.unwrap_or_default().block_number), - SendBundleResult::NoOperationsAfterFeeIncreases { - initial_op_count, - attempt_number, - } => info!("Bundle initially had {initial_op_count} operations, but after increasing gas fees {attempt_number} time(s) it was empty"), - SendBundleResult::StalledAtMaxFeeIncreases => warn!("Bundle failed to mine after {} fee increases", self.settings.max_fee_increases), - SendBundleResult::Error(error) => { - BuilderMetrics::increment_bundle_txns_failed(self.builder_index, self.entry_point.address()); - error!("Failed to send bundle. Will retry next block: {error:#?}"); } } - if let Some(t) = send_bundle_response.take() { - if t.send(result).is_err() { - error!("Failed to send bundle result to manual caller"); + // send result to manual caller + if let Some(res) = send_bundle_result.take() { + if let Some(r) = send_bundle_response.take() { + if r.send(res).is_err() { + error!("Failed to send bundle result to manual caller"); + } } } - - timer.reset(); } } } @@ -267,7 +328,7 @@ where ) -> Self { Self { builder_index, - bundle_action_receiver, + bundle_action_receiver: Some(bundle_action_receiver), chain_spec, beneficiary, proposer, @@ -280,18 +341,17 @@ where } } - async fn check_for_and_log_transaction_update(&self) { - let update = self.transaction_tracker.check_for_update_now().await; + async fn check_for_transaction_update(&mut self) -> Option { + let update = self.transaction_tracker.check_for_update().await; let update = match update { - Ok(update) => update, + Ok(update) => update?, Err(error) => { error!("Failed to check for transaction updates: {error:#?}"); - return; + return None; } }; - let Some(update) = update else { - return; - }; + + // process update before returning match update { TrackerUpdate::Mined { tx_hash, @@ -299,6 +359,7 @@ where attempt_number, gas_limit, gas_used, + nonce, .. } => { BuilderMetrics::increment_bundle_txns_success( @@ -316,8 +377,13 @@ where } else { info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); } + self.emit(BuilderEvent::transaction_mined( + self.builder_index, + tx_hash, + nonce.low_u64(), + block_number, + )); } - TrackerUpdate::StillPendingAfterWait => (), TrackerUpdate::LatestTxDropped { nonce } => { self.emit(BuilderEvent::latest_transaction_dropped( self.builder_index, @@ -338,189 +404,84 @@ where self.builder_index, self.entry_point.address(), ); - info!("Nonce used by external transaction") - } - TrackerUpdate::ReplacementUnderpriced => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced( - self.builder_index, - self.entry_point.address(), - ); - info!("Replacement transaction underpriced") + info!("Nonce used by external transaction"); } }; + + Some(update) } - /// Constructs a bundle and sends it to the entry point as a transaction. If - /// the bundle fails to be mined after - /// `settings.max_blocks_to_wait_for_mine` blocks, increases the gas fees by - /// enough to send a replacement transaction, then constructs a new bundle - /// using the new, higher gas requirements. Continues to retry with higher - /// gas costs until one of the following happens: + /// Constructs a bundle and sends it to the entry point as a transaction. /// - /// 1. A transaction succeeds (not necessarily the most recent one) - /// 2. The gas fees are high enough that the bundle is empty because there + /// Returns empty if: + /// - There are no ops available to bundle initially. + /// - The gas fees are high enough that the bundle is empty because there /// are no ops that meet the fee requirements. - /// 3. The transaction has not succeeded after `settings.max_fee_increases` - /// replacements. - async fn send_bundle_with_increasing_gas_fees(&self) -> SendBundleResult { - let result = self.send_bundle_with_increasing_gas_fees_inner().await; - match result { - Ok(result) => result, - Err(error) => SendBundleResult::Error(error), - } - } + async fn send_bundle( + &mut self, + fee_increase_count: u64, + ) -> anyhow::Result { + let (nonce, required_fees) = self.transaction_tracker.get_nonce_and_required_fees()?; + + let Some(bundle_tx) = self + .get_bundle_tx(nonce, required_fees, fee_increase_count > 0) + .await? + else { + self.emit(BuilderEvent::formed_bundle( + self.builder_index, + None, + nonce.low_u64(), + fee_increase_count, + required_fees, + )); + return Ok(SendBundleAttemptResult::NoOperations); + }; + let BundleTx { + tx, + expected_storage, + op_hashes, + } = bundle_tx; + + BuilderMetrics::increment_bundle_txns_sent(self.builder_index, self.entry_point.address()); - /// Helper function returning `Result` to be able to use `?`. - async fn send_bundle_with_increasing_gas_fees_inner(&self) -> anyhow::Result { - let (nonce, mut required_fees) = self.transaction_tracker.get_nonce_and_required_fees()?; - let mut initial_op_count: Option = None; - let mut is_replacement = false; - - for fee_increase_count in 0..=self.settings.max_fee_increases { - let Some(bundle_tx) = self - .get_bundle_tx(nonce, required_fees, is_replacement) - .await? - else { + let send_result = self + .transaction_tracker + .send_transaction(tx.clone(), &expected_storage) + .await; + + match send_result { + Ok(tx_hash) => { self.emit(BuilderEvent::formed_bundle( self.builder_index, - None, + Some(BundleTxDetails { + tx_hash, + tx, + op_hashes: Arc::new(op_hashes), + }), nonce.low_u64(), fee_increase_count, required_fees, )); - return Ok(match initial_op_count { - Some(initial_op_count) => { - BuilderMetrics::increment_bundle_txns_abandoned( - self.builder_index, - self.entry_point.address(), - ); - SendBundleResult::NoOperationsAfterFeeIncreases { - initial_op_count, - attempt_number: fee_increase_count, - } - } - None => SendBundleResult::NoOperationsInitially, - }); - }; - let BundleTx { - tx, - expected_storage, - op_hashes, - } = bundle_tx; - if initial_op_count.is_none() { - initial_op_count = Some(op_hashes.len()); - } - let current_fees = GasFees::from(&tx); - - BuilderMetrics::increment_bundle_txns_sent( - self.builder_index, - self.entry_point.address(), - ); - let send_result = self - .transaction_tracker - .send_transaction(tx.clone(), &expected_storage) - .await?; - let update = match send_result { - SendResult::TrackerUpdate(update) => update, - SendResult::TxHash(tx_hash) => { - self.emit(BuilderEvent::formed_bundle( - self.builder_index, - Some(BundleTxDetails { - tx_hash, - tx, - op_hashes: Arc::new(op_hashes), - }), - nonce.low_u64(), - fee_increase_count, - required_fees, - )); - self.transaction_tracker.wait_for_update().await? - } - }; - match update { - TrackerUpdate::Mined { - tx_hash, - nonce, - block_number, - attempt_number, - gas_limit, - gas_used, - } => { - self.emit(BuilderEvent::transaction_mined( - self.builder_index, - tx_hash, - nonce.low_u64(), - block_number, - )); - BuilderMetrics::increment_bundle_txns_success( - self.builder_index, - self.entry_point.address(), - ); - BuilderMetrics::set_bundle_gas_stats( - gas_limit, - gas_used, - self.builder_index, - self.entry_point.address(), - ); - return Ok(SendBundleResult::Success { - block_number, - attempt_number, - tx_hash, - }); - } - TrackerUpdate::StillPendingAfterWait => { - info!("Transaction not mined for several blocks") - } - TrackerUpdate::LatestTxDropped { nonce } => { - self.emit(BuilderEvent::latest_transaction_dropped( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_dropped( - self.builder_index, - self.entry_point.address(), - ); - info!("Previous transaction dropped by sender"); - } - TrackerUpdate::NonceUsedForOtherTx { nonce } => { - self.emit(BuilderEvent::nonce_used_for_other_transaction( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_nonce_used( - self.builder_index, - self.entry_point.address(), - ); - bail!("nonce used by external transaction") - } - TrackerUpdate::ReplacementUnderpriced => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced( - self.builder_index, - self.entry_point.address(), - ); - info!("Replacement transaction underpriced, increasing fees") - } - }; - info!( - "Bundle transaction failed to mine after {fee_increase_count} fee increases (maxFeePerGas: {}, maxPriorityFeePerGas: {}).", - current_fees.max_fee_per_gas, - current_fees.max_priority_fee_per_gas, - ); - BuilderMetrics::increment_bundle_txn_fee_increases( - self.builder_index, - self.entry_point.address(), - ); - required_fees = Some( - current_fees.increase_by_percent(self.settings.replacement_fee_percent_increase), - ); - is_replacement = true; + Ok(SendBundleAttemptResult::Success) + } + Err(TransactionTrackerError::NonceTooLow) => { + warn!("Replacement transaction underpriced"); + Ok(SendBundleAttemptResult::NonceTooLow) + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + BuilderMetrics::increment_bundle_txn_replacement_underpriced( + self.builder_index, + self.entry_point.address(), + ); + warn!("Replacement transaction underpriced"); + Ok(SendBundleAttemptResult::ReplacementUnderpriced) + } + Err(e) => { + error!("Failed to send bundle with unexpected error: {e:?}"); + Err(e.into()) + } } - BuilderMetrics::increment_bundle_txns_abandoned( - self.builder_index, - self.entry_point.address(), - ); - Ok(SendBundleResult::StalledAtMaxFeeIncreases) } /// Builds a bundle and returns some metadata and the transaction to send @@ -611,6 +572,162 @@ where } } +struct BundleSenderTrigger { + bundling_mode: BundlingMode, + block_rx: UnboundedReceiver, + bundle_action_receiver: mpsc::Receiver, + timer: tokio::time::Interval, + last_block: NewHead, +} + +impl BundleSenderTrigger { + async fn new( + pool_client: &P, + bundle_action_receiver: mpsc::Receiver, + timer_interval: Duration, + ) -> anyhow::Result { + let block_rx = Self::start_block_stream(pool_client).await?; + + Ok(Self { + bundling_mode: BundlingMode::Auto, + block_rx, + bundle_action_receiver, + timer: tokio::time::interval(timer_interval), + last_block: NewHead { + block_hash: H256::zero(), + block_number: 0, + }, + }) + } + + async fn start_block_stream( + pool_client: &P, + ) -> anyhow::Result> { + let Ok(mut new_heads) = pool_client.subscribe_new_heads().await else { + error!("Failed to subscribe to new blocks"); + bail!("failed to subscribe to new blocks"); + }; + + let (tx, rx) = mpsc::unbounded_channel(); + tokio::spawn(async move { + loop { + match new_heads.next().await { + Some(b) => { + if tx.send(b).is_err() { + error!("Failed to buffer new block for bundle sender"); + return; + } + } + None => { + error!("Block stream ended"); + return; + } + } + } + }); + + Ok(rx) + } + + async fn wait_for_trigger( + &mut self, + ) -> anyhow::Result>> { + let mut send_bundle_response: Option> = None; + + loop { + // 3 triggers for loop logic: + // 1 - new block + // - If auto mode, send next bundle + // 2 - timer tick + // - If auto mode, send next bundle + // 3 - action recv + // - If change mode, change and restart loop + // - If send bundle and manual mode, send next bundle + tokio::select! { + b = self.block_rx.recv() => { + let Some(b) = b else { + error!("Block stream closed"); + bail!("Block stream closed"); + }; + + self.last_block = b; + + match self.bundling_mode { + BundlingMode::Manual => continue, + BundlingMode::Auto => break, + } + }, + _ = self.timer.tick() => { + match self.bundling_mode { + BundlingMode::Manual => continue, + BundlingMode::Auto => break, + } + }, + a = self.bundle_action_receiver.recv() => { + match a { + Some(BundleSenderAction::ChangeMode(mode)) => { + debug!("changing bundling mode to {mode:?}"); + self.bundling_mode = mode; + continue; + }, + Some(BundleSenderAction::SendBundle(r)) => { + match self.bundling_mode { + BundlingMode::Manual => { + send_bundle_response = Some(r.responder); + break; + }, + BundlingMode::Auto => { + error!("Received bundle send action while in auto mode, ignoring"); + continue; + } + } + }, + None => { + error!("Bundle action recv closed"); + bail!("Bundle action recv closed"); + } + } + } + }; + } + + self.consume_blocks()?; + + Ok(send_bundle_response) + } + + async fn wait_for_block(&mut self) -> anyhow::Result { + self.block_rx + .recv() + .await + .ok_or_else(|| anyhow::anyhow!("Block stream closed"))?; + self.consume_blocks()?; + Ok(self.last_block.clone()) + } + + fn consume_blocks(&mut self) -> anyhow::Result<()> { + // Consume any other blocks that may have been buffered up + loop { + match self.block_rx.try_recv() { + Ok(b) => { + self.last_block = b; + } + Err(mpsc::error::TryRecvError::Empty) => { + return Ok(()); + } + Err(mpsc::error::TryRecvError::Disconnected) => { + error!("Block stream closed"); + bail!("Block stream closed"); + } + } + } + } + + fn last_block(&self) -> &NewHead { + &self.last_block + } +} + struct BuilderMetrics {} impl BuilderMetrics { diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index e958e574b..a83d0a296 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -54,6 +54,9 @@ pub(crate) enum TxSenderError { /// Replacement transaction was underpriced #[error("replacement transaction underpriced")] ReplacementUnderpriced, + /// Nonce too low + #[error("nonce too low")] + NonceTooLow, /// All other errors #[error(transparent)] Other(#[from] anyhow::Error), @@ -217,6 +220,8 @@ impl From for TxSenderError { if let Some(e) = e.as_error_response() { if e.message.contains("replacement transaction underpriced") { return TxSenderError::ReplacementUnderpriced; + } else if e.message.contains("nonce too low") { + return TxSenderError::NonceTooLow; } } TxSenderError::Other(value.into()) diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 27f7dbf01..ee8565955 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -415,8 +415,6 @@ where )?; let tracker_settings = transaction_tracker::Settings { - poll_interval: self.args.eth_poll_interval, - max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, replacement_fee_percent_increase: self.args.replacement_fee_percent_increase, }; @@ -429,8 +427,8 @@ where .await?; let builder_settings = bundle_sender::Settings { - replacement_fee_percent_increase: self.args.replacement_fee_percent_increase, max_fee_increases: self.args.max_fee_increases, + max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, }; let proposer = BundleProposerImpl::new( diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index 1dbe8f99d..b5a452615 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -11,7 +11,7 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::{bail, Context}; use async_trait::async_trait; @@ -19,8 +19,7 @@ use ethers::types::{transaction::eip2718::TypedTransaction, H256, U256}; use rundler_provider::Provider; use rundler_sim::ExpectedStorage; use rundler_types::GasFees; -use tokio::time; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use crate::sender::{TransactionSender, TxSenderError, TxStatus}; @@ -35,38 +34,46 @@ use crate::sender::{TransactionSender, TxSenderError, TxStatus}; /// have changed so that it is worth making another attempt. #[async_trait] pub(crate) trait TransactionTracker: Send + Sync + 'static { - fn get_nonce_and_required_fees(&self) -> anyhow::Result<(U256, Option)>; + /// Returns the current nonce and the required fees for the next transaction. + fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)>; /// Sends the provided transaction and typically returns its transaction /// hash, but if the transaction failed to send because another transaction /// with the same nonce mined first, then returns information about that /// transaction instead. async fn send_transaction( - &self, + &mut self, tx: TypedTransaction, expected_stroage: &ExpectedStorage, - ) -> anyhow::Result; + ) -> TransactionTrackerResult; - /// Waits until one of the following occurs: - /// + /// Checks: /// 1. One of our transactions mines (not necessarily the one just sent). /// 2. All our send transactions have dropped. /// 3. Our nonce has changed but none of our transactions mined. This means /// that a transaction from our account other than one of the ones we are /// tracking has mined. This should not normally happen. /// 4. Several new blocks have passed. - async fn wait_for_update(&self) -> anyhow::Result; + async fn check_for_update(&mut self) -> TransactionTrackerResult>; - /// Like `wait_for_update`, except it returns immediately if there is no - /// update rather than waiting for several new blocks. - async fn check_for_update_now(&self) -> anyhow::Result>; + /// Resets the tracker to its initial state + async fn reset(&mut self); } -pub(crate) enum SendResult { - TxHash(H256), - TrackerUpdate(TrackerUpdate), +/// Errors that can occur while using a `TransactionTracker`. +#[derive(Debug, thiserror::Error)] +pub(crate) enum TransactionTrackerError { + #[error("nonce too low")] + NonceTooLow, + #[error("replacement transaction underpriced")] + ReplacementUnderpriced, + /// All other errors + #[error(transparent)] + Other(#[from] anyhow::Error), } +pub(crate) type TransactionTrackerResult = std::result::Result; + #[derive(Debug)] #[allow(dead_code)] pub(crate) enum TrackerUpdate { @@ -78,26 +85,16 @@ pub(crate) enum TrackerUpdate { gas_limit: Option, gas_used: Option, }, - StillPendingAfterWait, LatestTxDropped { nonce: U256, }, NonceUsedForOtherTx { nonce: U256, }, - ReplacementUnderpriced, } #[derive(Debug)] -pub(crate) struct TransactionTrackerImpl( - tokio::sync::Mutex>, -) -where - P: Provider, - T: TransactionSender; - -#[derive(Debug)] -struct TransactionTrackerImplInner +pub(crate) struct TransactionTrackerImpl where P: Provider, T: TransactionSender, @@ -114,8 +111,6 @@ where #[derive(Clone, Copy, Debug)] pub(crate) struct Settings { - pub(crate) poll_interval: Duration, - pub(crate) max_blocks_to_wait_for_mine: u64, pub(crate) replacement_fee_percent_increase: u64, } @@ -126,33 +121,6 @@ struct PendingTransaction { attempt_number: u64, } -#[async_trait] -impl TransactionTracker for TransactionTrackerImpl -where - P: Provider, - T: TransactionSender, -{ - fn get_nonce_and_required_fees(&self) -> anyhow::Result<(U256, Option)> { - Ok(self.inner()?.get_nonce_and_required_fees()) - } - - async fn send_transaction( - &self, - tx: TypedTransaction, - expected_storage: &ExpectedStorage, - ) -> anyhow::Result { - self.inner()?.send_transaction(tx, expected_storage).await - } - - async fn wait_for_update(&self) -> anyhow::Result { - self.inner()?.wait_for_update().await - } - - async fn check_for_update_now(&self) -> anyhow::Result> { - self.inner()?.check_for_update_now().await - } -} - impl TransactionTrackerImpl where P: Provider, @@ -163,31 +131,6 @@ where sender: T, settings: Settings, builder_index: u64, - ) -> anyhow::Result { - let inner = - TransactionTrackerImplInner::new(provider, sender, settings, builder_index).await?; - Ok(Self(tokio::sync::Mutex::new(inner))) - } - - fn inner( - &self, - ) -> anyhow::Result>> { - self.0 - .try_lock() - .context("tracker should not be called while waiting for a transaction") - } -} - -impl TransactionTrackerImplInner -where - P: Provider, - T: TransactionSender, -{ - async fn new( - provider: Arc

, - sender: T, - settings: Settings, - builder_index: u64, ) -> anyhow::Result { let nonce = provider .get_transaction_count(sender.address()) @@ -205,7 +148,84 @@ where }) } - fn get_nonce_and_required_fees(&self) -> (U256, Option) { + fn set_nonce_and_clear_state(&mut self, nonce: U256) { + self.nonce = nonce; + self.transactions.clear(); + self.has_dropped = false; + self.attempt_count = 0; + self.update_metrics(); + } + + async fn get_external_nonce(&self) -> anyhow::Result { + self.provider + .get_transaction_count(self.sender.address()) + .await + .context("tracker should load current nonce from provider") + } + + fn validate_transaction(&self, tx: &TypedTransaction) -> anyhow::Result<()> { + let Some(&nonce) = tx.nonce() else { + bail!("transaction given to tracker should have nonce set"); + }; + let gas_fees = GasFees::from(tx); + let (required_nonce, required_gas_fees) = self.get_nonce_and_required_fees()?; + if nonce != required_nonce { + bail!("tried to send transaction with nonce {nonce}, but should match tracker's nonce of {required_nonce}"); + } + if let Some(required_gas_fees) = required_gas_fees { + if gas_fees.max_fee_per_gas < required_gas_fees.max_fee_per_gas + || gas_fees.max_priority_fee_per_gas < required_gas_fees.max_priority_fee_per_gas + { + bail!("new transaction's gas fees should be at least the required fees") + } + } + Ok(()) + } + + fn update_metrics(&self) { + TransactionTrackerMetrics::set_num_pending_transactions( + self.builder_index, + self.transactions.len(), + ); + TransactionTrackerMetrics::set_nonce(self.builder_index, self.nonce); + TransactionTrackerMetrics::set_attempt_count(self.builder_index, self.attempt_count); + if let Some(tx) = self.transactions.last() { + TransactionTrackerMetrics::set_current_fees(self.builder_index, Some(tx.gas_fees)); + } else { + TransactionTrackerMetrics::set_current_fees(self.builder_index, None); + } + } + + async fn get_mined_tx_gas_info( + &self, + tx_hash: H256, + ) -> anyhow::Result<(Option, Option)> { + let (tx, tx_receipt) = tokio::try_join!( + self.provider.get_transaction(tx_hash), + self.provider.get_transaction_receipt(tx_hash), + )?; + let gas_limit = tx.map(|t| t.gas).or_else(|| { + warn!("failed to fetch transaction data for tx: {}", tx_hash); + None + }); + let gas_used = match tx_receipt { + Some(r) => r.gas_used, + None => { + warn!("failed to fetch transaction receipt for tx: {}", tx_hash); + None + } + }; + Ok((gas_limit, gas_used)) + } +} + +#[async_trait] +impl TransactionTracker for TransactionTrackerImpl +where + P: Provider, + T: TransactionSender, +{ + fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)> { let gas_fees = if self.has_dropped { None } else { @@ -214,24 +234,17 @@ where .increase_by_percent(self.settings.replacement_fee_percent_increase) }) }; - (self.nonce, gas_fees) + Ok((self.nonce, gas_fees)) } async fn send_transaction( &mut self, tx: TypedTransaction, expected_storage: &ExpectedStorage, - ) -> anyhow::Result { + ) -> TransactionTrackerResult { self.validate_transaction(&tx)?; let gas_fees = GasFees::from(&tx); - let send_result = self.sender.send_transaction(tx, expected_storage).await; - let sent_tx = match send_result { - Ok(sent_tx) => sent_tx, - Err(error) => { - let tracker_update = self.handle_send_error(error).await?; - return Ok(SendResult::TrackerUpdate(tracker_update)); - } - }; + let sent_tx = self.sender.send_transaction(tx, expected_storage).await?; info!( "Sent transaction {:?} nonce: {:?}", sent_tx.tx_hash, sent_tx.nonce @@ -244,61 +257,18 @@ where self.has_dropped = false; self.attempt_count += 1; self.update_metrics(); - Ok(SendResult::TxHash(sent_tx.tx_hash)) + Ok(sent_tx.tx_hash) } - /// When we fail to send a transaction, it may be because another - /// transaction has mined before it could be sent, invalidating the nonce. - /// Thus, do one last check for an update before returning the error. - async fn handle_send_error(&mut self, error: TxSenderError) -> anyhow::Result { - match &error { - TxSenderError::ReplacementUnderpriced => { - return Ok(TrackerUpdate::ReplacementUnderpriced) - } - TxSenderError::Other(_error) => {} - } - - let update = self.check_for_update_now().await?; - let Some(update) = update else { - return Err(error.into()); - }; - match &update { - TrackerUpdate::StillPendingAfterWait | TrackerUpdate::LatestTxDropped { .. } => { - Err(error.into()) - } - _ => Ok(update), - } - } - - async fn wait_for_update(&mut self) -> anyhow::Result { - let start_block_number = self - .provider - .get_block_number() - .await - .context("tracker should get starting block when waiting for update")?; - let end_block_number = start_block_number + self.settings.max_blocks_to_wait_for_mine; - loop { - let update = self.check_for_update_now().await?; - if let Some(update) = update { - return Ok(update); - } - let current_block_number = self - .provider - .get_block_number() - .await - .context("tracker should get current block when polling for updates")?; - if end_block_number <= current_block_number { - return Ok(TrackerUpdate::StillPendingAfterWait); - } - time::sleep(self.settings.poll_interval).await; - } - } - - async fn check_for_update_now(&mut self) -> anyhow::Result> { + async fn check_for_update(&mut self) -> TransactionTrackerResult> { let external_nonce = self.get_external_nonce().await?; if self.nonce < external_nonce { // The nonce has changed. Check to see which of our transactions has // mined, if any. + debug!( + "Nonce has changed from {:?} to {:?}", + self.nonce, external_nonce + ); let mut out = TrackerUpdate::NonceUsedForOtherTx { nonce: self.nonce }; for tx in self.transactions.iter().rev() { @@ -307,6 +277,7 @@ where .get_transaction_status(tx.tx_hash) .await .context("tracker should check transaction status when the nonce changes")?; + info!("Status of tx {:?}: {:?}", tx.tx_hash, status); if let TxStatus::Mined { block_number } = status { let (gas_limit, gas_used) = self.get_mined_tx_gas_info(tx.tx_hash).await?; out = TrackerUpdate::Mined { @@ -361,74 +332,21 @@ where }) } - fn set_nonce_and_clear_state(&mut self, nonce: U256) { - self.nonce = nonce; - self.transactions.clear(); - self.has_dropped = false; - self.attempt_count = 0; - self.update_metrics(); - } - - async fn get_external_nonce(&self) -> anyhow::Result { - self.provider - .get_transaction_count(self.sender.address()) - .await - .context("tracker should load current nonce from provider") + async fn reset(&mut self) { + let nonce = self.get_external_nonce().await.unwrap_or(self.nonce); + self.set_nonce_and_clear_state(nonce); } +} - fn validate_transaction(&self, tx: &TypedTransaction) -> anyhow::Result<()> { - let Some(&nonce) = tx.nonce() else { - bail!("transaction given to tracker should have nonce set"); - }; - let gas_fees = GasFees::from(tx); - let (required_nonce, required_gas_fees) = self.get_nonce_and_required_fees(); - if nonce != required_nonce { - bail!("tried to send transaction with nonce {nonce}, but should match tracker's nonce of {required_nonce}"); - } - if let Some(required_gas_fees) = required_gas_fees { - if gas_fees.max_fee_per_gas < required_gas_fees.max_fee_per_gas - || gas_fees.max_priority_fee_per_gas < required_gas_fees.max_priority_fee_per_gas - { - bail!("new transaction's gas fees should be at least the required fees") +impl From for TransactionTrackerError { + fn from(value: TxSenderError) -> Self { + match value { + TxSenderError::NonceTooLow => TransactionTrackerError::NonceTooLow, + TxSenderError::ReplacementUnderpriced => { + TransactionTrackerError::ReplacementUnderpriced } + TxSenderError::Other(e) => TransactionTrackerError::Other(e), } - Ok(()) - } - - fn update_metrics(&self) { - TransactionTrackerMetrics::set_num_pending_transactions( - self.builder_index, - self.transactions.len(), - ); - TransactionTrackerMetrics::set_nonce(self.builder_index, self.nonce); - TransactionTrackerMetrics::set_attempt_count(self.builder_index, self.attempt_count); - if let Some(tx) = self.transactions.last() { - TransactionTrackerMetrics::set_current_fees(self.builder_index, Some(tx.gas_fees)); - } else { - TransactionTrackerMetrics::set_current_fees(self.builder_index, None); - } - } - - async fn get_mined_tx_gas_info( - &self, - tx_hash: H256, - ) -> anyhow::Result<(Option, Option)> { - let (tx, tx_receipt) = tokio::try_join!( - self.provider.get_transaction(tx_hash), - self.provider.get_transaction_receipt(tx_hash), - )?; - let gas_limit = tx.map(|t| t.gas).or_else(|| { - warn!("failed to fetch transaction data for tx: {}", tx_hash); - None - }); - let gas_used = match tx_receipt { - Some(r) => r.gas_used, - None => { - warn!("failed to fetch transaction receipt for tx: {}", tx_hash); - None - } - }; - Ok((gas_limit, gas_used)) } } @@ -482,8 +400,6 @@ mod tests { provider: MockProvider, ) -> TransactionTrackerImpl { let settings = Settings { - poll_interval: Duration::from_secs(0), - max_blocks_to_wait_for_mine: 3, replacement_fee_percent_increase: 5, }; @@ -512,7 +428,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new() .nonce(0) @@ -598,7 +514,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(2))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new(); let exp = ExpectedStorage::default(); @@ -625,7 +541,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(2))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); @@ -651,41 +567,11 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); - let sent_transaction = tracker.send_transaction(tx.into(), &exp).await.unwrap(); - - assert!(matches!(sent_transaction, SendResult::TxHash(..))); - } - - #[tokio::test] - async fn test_wait_for_update_still_pending() { - let (mut sender, mut provider) = create_base_config(); - sender.expect_address().return_const(Address::zero()); - - let mut s = Sequence::new(); - - provider - .expect_get_transaction_count() - .returning(move |_a| Ok(U256::from(0))); - - for block_number in 1..=4 { - provider - .expect_get_block_number() - .returning(move || Ok(block_number)) - .times(1) - .in_sequence(&mut s); - } - - let tracker = create_tracker(sender, provider).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); - - assert!(matches!( - tracker_update, - TrackerUpdate::StillPendingAfterWait - )); + tracker.send_transaction(tx.into(), &exp).await.unwrap(); } // TODO(#295): fix dropped status @@ -727,7 +613,7 @@ mod tests { // } #[tokio::test] - async fn test_wait_for_update_nonce_used() { + async fn test_check_for_update_nonce_used() { let (mut sender, mut provider) = create_base_config(); sender.expect_address().return_const(Address::zero()); @@ -740,14 +626,9 @@ mod tests { .in_sequence(&mut provider_seq); } - provider - .expect_get_block_number() - .returning(move || Ok(1)) - .times(1); + let mut tracker = create_tracker(sender, provider).await; - let tracker = create_tracker(sender, provider).await; - - let tracker_update = tracker.wait_for_update().await.unwrap(); + let tracker_update = tracker.check_for_update().await.unwrap().unwrap(); assert!(matches!( tracker_update, @@ -756,7 +637,7 @@ mod tests { } #[tokio::test] - async fn test_wait_for_update_mined() { + async fn test_check_for_update_mined() { let (mut sender, mut provider) = create_base_config(); sender.expect_address().return_const(Address::zero()); sender @@ -776,11 +657,6 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - provider - .expect_get_block_number() - .returning(move || Ok(1)) - .times(1); - provider.expect_get_transaction().returning(|_: H256| { Ok(Some(Transaction { gas: U256::from(0), @@ -797,14 +673,14 @@ mod tests { })) }); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); // send dummy transaction let _sent = tracker.send_transaction(tx.into(), &exp).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); + let tracker_update = tracker.check_for_update().await.unwrap().unwrap(); assert!(matches!(tracker_update, TrackerUpdate::Mined { .. })); } From ab1cf50f92f36e318146cd79585199859b0ce72e Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 11 Jun 2024 11:56:55 -0500 Subject: [PATCH 05/17] feat(builder): cancel transaction when replacement underpriced --- crates/builder/src/bundle_proposer.rs | 21 +- crates/builder/src/bundle_sender.rs | 276 +++++++++++++++------- crates/builder/src/sender/bloxroute.rs | 30 ++- crates/builder/src/sender/conditional.rs | 31 ++- crates/builder/src/sender/flashbots.rs | 99 ++++++-- crates/builder/src/sender/mod.rs | 40 +++- crates/builder/src/sender/raw.rs | 32 ++- crates/builder/src/transaction_tracker.rs | 63 ++++- 8 files changed, 480 insertions(+), 112 deletions(-) diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 49375116b..d006a395f 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -96,11 +96,21 @@ impl Bundle { pub(crate) trait BundleProposer: Send + Sync + 'static { type UO: UserOperation; + /// Constructs the next bundle + /// + /// If `min_fees` is `Some`, the proposer will ensure the bundle has + /// at least `min_fees`. async fn make_bundle( &self, - required_fees: Option, + min_fees: Option, is_replacement: bool, ) -> anyhow::Result>; + + /// Gets the current gas fees + /// + /// If `min_fees` is `Some`, the proposer will ensure the gas fees returned are at least `min_fees`. + async fn estimate_gas_fees(&self, min_fees: Option) + -> anyhow::Result<(GasFees, U256)>; } #[derive(Debug)] @@ -138,6 +148,13 @@ where { type UO = UO; + async fn estimate_gas_fees( + &self, + required_fees: Option, + ) -> anyhow::Result<(GasFees, U256)> { + self.fee_estimator.required_bundle_fees(required_fees).await + } + async fn make_bundle( &self, required_fees: Option, @@ -148,7 +165,7 @@ where self.provider .get_latest_block_hash_and_number() .map_err(anyhow::Error::from), - self.fee_estimator.required_bundle_fees(required_fees) + self.estimate_gas_fees(required_fees) )?; if ops.is_empty() { return Ok(Bundle::default()); diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index cae080929..67375f0c5 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -132,6 +132,12 @@ where // Waiting for a bundle to be mined // (wait_until_block, fee_increase_count) Pending(u64, u64), + // Cancelling the last transaction + // (fee_increase_count) + Cancelling(u64), + // Waiting for a cancellation transaction to be mined + // (wait_until_block, fee_increase_count) + CancelPending(u64, u64), } // initial state @@ -181,39 +187,40 @@ where debug!("No operations in bundle"); if fee_increase_count > 0 { - // TODO(danc): when abandoning we tend to get "stuck" where we have a pending bundle - // transaction that isn't landing. We should move to a new state where we track the - // pending transaction and "cancel" it if it doesn't land after a certain number of blocks. - - warn!("Abandoning bundle after fee increases {fee_increase_count}, no operations available, waiting for next trigger"); + warn!("Abandoning bundle after fee increases {fee_increase_count}, no operations available"); BuilderMetrics::increment_bundle_txns_abandoned( self.builder_index, self.entry_point.address(), ); - self.transaction_tracker.reset().await; send_bundle_result = Some(SendBundleResult::NoOperationsAfterFeeIncreases { attempt_number: fee_increase_count, - }) + }); + + // abandon the bundle by resetting the tracker and starting a new bundle process + // If the node we are using still has the transaction in the mempool, its + // possible we will get a `ReplacementUnderpriced` on the next iteration + // and will start a cancellation. + self.transaction_tracker.reset().await; + state = State::Building(true, 0); } else { debug!("No operations available, waiting for next trigger"); send_bundle_result = Some(SendBundleResult::NoOperationsInitially); + state = State::Building(true, 0); } - - state = State::Building(true, 0); } Ok(SendBundleAttemptResult::NonceTooLow) => { // reset the transaction tracker and try again + info!("Nonce too low, starting new bundle attempt"); self.transaction_tracker.reset().await; state = State::Building(true, 0); } Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { - // TODO(danc): handle replacement underpriced - // move to the cancellation state - - // for now: reset the transaction tracker and try again + info!( + "Replacement transaction underpriced, entering cancellation loop" + ); self.transaction_tracker.reset().await; - state = State::Building(true, 0); + state = State::Cancelling(0); } Err(error) => { error!("Bundle send error {error:?}"); @@ -236,11 +243,26 @@ where TrackerUpdate::Mined { block_number, attempt_number, + gas_limit, + gas_used, tx_hash, + nonce, .. } => { // mined! info!("Bundle transaction mined"); + BuilderMetrics::process_bundle_txn_success( + self.builder_index, + self.entry_point.address(), + gas_limit, + gas_used, + ); + self.emit(BuilderEvent::transaction_mined( + self.builder_index, + tx_hash, + nonce.low_u64(), + block_number, + )); send_bundle_result = Some(SendBundleResult::Success { block_number, attempt_number, @@ -248,46 +270,156 @@ where }); state = State::Building(true, 0); } - TrackerUpdate::LatestTxDropped { .. } => { + TrackerUpdate::LatestTxDropped { nonce } => { // try again, don't wait for trigger, re-estimate fees info!("Latest transaction dropped, starting new bundle attempt"); + self.emit(BuilderEvent::latest_transaction_dropped( + self.builder_index, + nonce.low_u64(), + )); + BuilderMetrics::increment_bundle_txns_dropped( + self.builder_index, + self.entry_point.address(), + ); // force reset the transaction tracker self.transaction_tracker.reset().await; - state = State::Building(true, 0); } - TrackerUpdate::NonceUsedForOtherTx { .. } => { + TrackerUpdate::NonceUsedForOtherTx { nonce } => { // try again, don't wait for trigger, re-estimate fees info!("Nonce used externally, starting new bundle attempt"); + self.emit(BuilderEvent::nonce_used_for_other_transaction( + self.builder_index, + nonce.low_u64(), + )); + BuilderMetrics::increment_bundle_txns_nonce_used( + self.builder_index, + self.entry_point.address(), + ); state = State::Building(true, 0); } } } else if sender_trigger.last_block().block_number >= until { - if fee_increase_count >= self.settings.max_fee_increases { - // TODO(danc): same "stuck" issue here on abandonment - // this abandon is likely to lead to "transaction underpriced" errors - // Instead, move to cancellation state. + // start replacement, don't wait for trigger. Continue + // to attempt until there are no longer any UOs priced high enough + // to bundle. + info!( + "Not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + fee_increase_count + 1 + ); + BuilderMetrics::increment_bundle_txn_fee_increases( + self.builder_index, + self.entry_point.address(), + ); + state = State::Building(false, fee_increase_count + 1); + } + } + State::Cancelling(fee_increase_count) => { + // cancel the transaction + info!("Cancelling last transaction"); + + let (estimated_fees, _) = self + .proposer + .estimate_gas_fees(None) + .await + .unwrap_or_default(); + + let cancel_res = self + .transaction_tracker + .cancel_transaction(self.entry_point.address(), estimated_fees) + .await; + + match cancel_res { + Ok(Some(_)) => { + info!("Cancellation transaction sent, waiting for confirmation"); + BuilderMetrics::increment_cancellation_txns_sent( + self.builder_index, + self.entry_point.address(), + ); + + state = State::CancelPending( + sender_trigger.last_block.block_number + + self.settings.max_blocks_to_wait_for_mine, + fee_increase_count, + ); + } + Ok(None) => { + info!("Soft cancellation or no transaction to cancel, starting new bundle attempt"); + BuilderMetrics::increment_soft_cancellations( + self.builder_index, + self.entry_point.address(), + ); - warn!("Abandoning bundle after max fee increases {fee_increase_count}"); - BuilderMetrics::increment_bundle_txns_abandoned( + state = State::Building(true, 0); + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced during cancellation, trying again"); + state = State::Cancelling(fee_increase_count + 1); + } + Err(TransactionTrackerError::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low during cancellation, starting new bundle attempt"); + self.transaction_tracker.reset().await; + state = State::Building(true, 0); + } + Err(e) => { + error!("Failed to cancel transaction, moving back to building state: {e:#?}"); + BuilderMetrics::increment_cancellation_txns_failed( self.builder_index, self.entry_point.address(), ); + state = State::Building(true, 0); + } + } + } + State::CancelPending(until, fee_increase_count) => { + sender_trigger.wait_for_block().await?; + + // check for transaction update + if let Some(update) = self.check_for_transaction_update().await { + match update { + TrackerUpdate::Mined { .. } => { + // mined + info!("Cancellation transaction mined"); + BuilderMetrics::increment_cancellation_txns_mined( + self.builder_index, + self.entry_point.address(), + ); + } + TrackerUpdate::LatestTxDropped { .. } => { + // If a cancellation gets dropped, move to bundling state as there is no + // longer a pending transaction + info!( + "Cancellation transaction dropped, starting new bundle attempt" + ); + // force reset the transaction tracker + self.transaction_tracker.reset().await; + } + TrackerUpdate::NonceUsedForOtherTx { .. } => { + // If a nonce is used externally, move to bundling state as there is no longer + // a pending transaction + info!("Nonce used externally while cancelling, starting new bundle attempt"); + } + } + + state = State::Building(true, 0); + } else if sender_trigger.last_block().block_number >= until { + if fee_increase_count >= self.settings.max_fee_increases { + // abandon the cancellation + warn!("Abandoning cancellation after max fee increases {fee_increase_count}, starting new bundle attempt"); + // force reset the transaction tracker self.transaction_tracker.reset().await; state = State::Building(true, 0); } else { // start replacement, don't wait for trigger info!( - "Not mined after {} blocks, increasing fees, attempt: {}", + "Cancellation not mined after {} blocks, increasing fees, attempt: {}", self.settings.max_blocks_to_wait_for_mine, fee_increase_count + 1 ); - BuilderMetrics::increment_bundle_txn_fee_increases( - self.builder_index, - self.entry_point.address(), - ); - state = State::Building(false, fee_increase_count + 1); + state = State::Cancelling(fee_increase_count + 1); } } } @@ -351,60 +483,25 @@ where } }; - // process update before returning match update { TrackerUpdate::Mined { tx_hash, block_number, attempt_number, - gas_limit, - gas_used, nonce, .. } => { - BuilderMetrics::increment_bundle_txns_success( - self.builder_index, - self.entry_point.address(), - ); - BuilderMetrics::set_bundle_gas_stats( - gas_limit, - gas_used, - self.builder_index, - self.entry_point.address(), - ); if attempt_number == 0 { - info!("Bundle with hash {tx_hash:?} landed in block {block_number}"); + info!("Transaction with hash {tx_hash:?}, nonce {nonce:?}, landed in block {block_number}"); } else { - info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); + info!("Transaction with hash {tx_hash:?}, nonce {nonce:?}, landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); } - self.emit(BuilderEvent::transaction_mined( - self.builder_index, - tx_hash, - nonce.low_u64(), - block_number, - )); } TrackerUpdate::LatestTxDropped { nonce } => { - self.emit(BuilderEvent::latest_transaction_dropped( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_dropped( - self.builder_index, - self.entry_point.address(), - ); - info!("Previous transaction dropped by sender"); + info!("Previous transaction dropped by sender. Nonce: {nonce:?}"); } TrackerUpdate::NonceUsedForOtherTx { nonce } => { - self.emit(BuilderEvent::nonce_used_for_other_transaction( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_nonce_used( - self.builder_index, - self.entry_point.address(), - ); - info!("Nonce used by external transaction"); + info!("Nonce used by external transaction. Nonce: {nonce:?}"); } }; @@ -697,7 +794,8 @@ impl BundleSenderTrigger { } async fn wait_for_block(&mut self) -> anyhow::Result { - self.block_rx + self.last_block = self + .block_rx .recv() .await .ok_or_else(|| anyhow::anyhow!("Block stream closed"))?; @@ -736,8 +834,20 @@ impl BuilderMetrics { .increment(1); } - fn increment_bundle_txns_success(builder_index: u64, entry_point: Address) { + fn process_bundle_txn_success( + builder_index: u64, + entry_point: Address, + gas_limit: Option, + gas_used: Option, + ) { metrics::counter!("builder_bundle_txns_success", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + + if let Some(limit) = gas_limit { + metrics::counter!("builder_bundle_gas_limit", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(limit.as_u64()); + } + if let Some(used) = gas_used { + metrics::counter!("builder_bundle_gas_used", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(used.as_u64()); + } } fn increment_bundle_txns_dropped(builder_index: u64, entry_point: Address) { @@ -766,17 +876,19 @@ impl BuilderMetrics { metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); } - fn set_bundle_gas_stats( - gas_limit: Option, - gas_used: Option, - builder_index: u64, - entry_point: Address, - ) { - if let Some(limit) = gas_limit { - metrics::counter!("builder_bundle_gas_limit", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(limit.as_u64()); - } - if let Some(used) = gas_used { - metrics::counter!("builder_bundle_gas_used", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(used.as_u64()); - } + fn increment_cancellation_txns_sent(builder_index: u64, entry_point: Address) { + metrics::counter!("builder_cancellation_txns_sent", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_mined(builder_index: u64, entry_point: Address) { + metrics::counter!("builder_cancellation_txns_mined", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + } + + fn increment_soft_cancellations(builder_index: u64, entry_point: Address) { + metrics::counter!("builder_soft_cancellations", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_failed(builder_index: u64, entry_point: Address) { + metrics::counter!("builder_cancellation_txns_failed", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); } } diff --git a/crates/builder/src/sender/bloxroute.rs b/crates/builder/src/sender/bloxroute.rs index b23ea7a92..66a8807b3 100644 --- a/crates/builder/src/sender/bloxroute.rs +++ b/crates/builder/src/sender/bloxroute.rs @@ -19,6 +19,7 @@ use ethers::{ providers::{JsonRpcClient, Middleware, Provider}, types::{ transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, + U256, }, utils::hex, }; @@ -28,12 +29,16 @@ use jsonrpsee::{ http_client::{transport::HttpBackend, HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}, }; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue; use tokio::time; use tonic::async_trait; -use super::{fill_and_sign, Result, SentTxInfo, TransactionSender, TxStatus}; +use super::{ + create_hard_cancel_tx, fill_and_sign, CancelTxInfo, Result, SentTxInfo, TransactionSender, + TxStatus, +}; pub(crate) struct PolygonBloxrouteTransactionSender where @@ -62,6 +67,29 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; + + let tx_hash = self + .provider + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await?; + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let tx = self .provider diff --git a/crates/builder/src/sender/conditional.rs b/crates/builder/src/sender/conditional.rs index e02f78a68..5e1ca3e35 100644 --- a/crates/builder/src/sender/conditional.rs +++ b/crates/builder/src/sender/conditional.rs @@ -17,14 +17,18 @@ use anyhow::Context; use ethers::{ middleware::SignerMiddleware, providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256}, + types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256, U256}, }; use ethers_signers::Signer; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; use serde_json::json; use tonic::async_trait; -use super::{fill_and_sign, Result, SentTxInfo, TransactionSender, TxStatus}; +use super::{ + create_hard_cancel_tx, fill_and_sign, CancelTxInfo, Result, SentTxInfo, TransactionSender, + TxStatus, +}; pub(crate) struct ConditionalTransactionSender where @@ -62,6 +66,29 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; + + let tx_hash = self + .provider + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await?; + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let tx = self .provider diff --git a/crates/builder/src/sender/flashbots.rs b/crates/builder/src/sender/flashbots.rs index bad8c6bf3..eecaf1144 100644 --- a/crates/builder/src/sender/flashbots.rs +++ b/crates/builder/src/sender/flashbots.rs @@ -26,8 +26,7 @@ use ethers::{ middleware::SignerMiddleware, providers::{interval, JsonRpcClient, Middleware, Provider}, types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, - U256, U64, + transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, H256, U256, U64, }, utils, }; @@ -37,8 +36,9 @@ use futures_util::{Stream, StreamExt, TryFutureExt}; use pin_project::pin_project; use reqwest::{ header::{HeaderMap, HeaderValue, CONTENT_TYPE}, - Client, + Client, Response, }; +use rundler_types::GasFees; use serde::{de, Deserialize, Serialize}; use serde_json::{json, Value}; use tonic::async_trait; @@ -46,6 +46,7 @@ use tonic::async_trait; use super::{ fill_and_sign, ExpectedStorage, Result, SentTxInfo, TransactionSender, TxSenderError, TxStatus, }; +use crate::sender::CancelTxInfo; #[derive(Debug)] pub(crate) struct FlashbotsTransactionSender { @@ -75,6 +76,28 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + tx_hash: H256, + _nonce: U256, + _to: Address, + _gas_fees: GasFees, + ) -> Result { + let success = self + .flashbots_client + .cancel_private_transaction(tx_hash) + .await?; + + if !success { + return Err(TxSenderError::SoftCancelFailed); + } + + Ok(CancelTxInfo { + tx_hash: H256::zero(), + soft_cancelled: true, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let status = self.flashbots_client.status(tx_hash).await?; Ok(match status.status { @@ -181,13 +204,31 @@ struct Refund { #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] -struct FlashbotsPrivateTransaction { +struct FlashbotsSendPrivateTransactionRequest { tx: Bytes, #[serde(skip_serializing_if = "Option::is_none")] max_block_number: Option, preferences: Preferences, } +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsSendPrivateTransactionResponse { + result: H256, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsCancelPrivateTransactionRequest { + tx_hash: H256, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsCancelPrivateTransactionResponse { + result: bool, +} + #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] #[allow(dead_code)] @@ -277,7 +318,7 @@ where "jsonrpc": "2.0", "method": "eth_sendPrivateTransaction", "params": [ - FlashbotsPrivateTransaction { + FlashbotsSendPrivateTransactionRequest { tx: raw_tx, max_block_number: None, preferences, @@ -285,6 +326,37 @@ where "id": 1 }); + let response = self.sign_send_request(body).await?; + + let parsed_response = response + .json::() + .await + .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; + + Ok(parsed_response.result) + } + + async fn cancel_private_transaction(&self, tx_hash: H256) -> anyhow::Result { + let body = json!({ + "jsonrpc": "2.0", + "method": "eth_cancelPrivateTransaction", + "params": [ + FlashbotsCancelPrivateTransactionRequest { tx_hash } + ], + "id": 1 + }); + + let response = self.sign_send_request(body).await?; + + let parsed_response = response + .json::() + .await + .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; + + Ok(parsed_response.result) + } + + async fn sign_send_request(&self, body: Value) -> anyhow::Result { let signature = self .signer .sign_message(format!( @@ -302,29 +374,16 @@ where headers.insert("x-flashbots-signature", header_val); // Send the request - let response = self - .http_client + self.http_client .post(&self.relay_url) .headers(headers) .body(body.to_string()) .send() .await - .map_err(|e| anyhow!("failed to send transaction to Flashbots: {:?}", e))?; - - let parsed_response = response - .json::() - .await - .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; - - Ok(parsed_response.result) + .map_err(|e| anyhow!("failed to send request to Flashbots: {:?}", e)) } } -#[derive(Deserialize, Debug)] -struct FlashbotsResponse { - result: TxHash, -} - type PinBoxFut<'a, T> = Pin> + Send + 'a>>; enum PendingFlashbotsTxState<'a> { diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index a83d0a296..df4214a9a 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -26,7 +26,8 @@ use ethers::{ prelude::SignerMiddleware, providers::{JsonRpcClient, Middleware, Provider, ProviderError}, types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, H256, U256, + transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest, + TransactionReceipt, H256, U256, }, }; use ethers_signers::{LocalWallet, Signer}; @@ -35,12 +36,22 @@ pub(crate) use flashbots::FlashbotsTransactionSender; use mockall::automock; pub(crate) use raw::RawTransactionSender; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; + #[derive(Debug)] pub(crate) struct SentTxInfo { pub(crate) nonce: U256, pub(crate) tx_hash: H256, } +#[derive(Debug)] +pub(crate) struct CancelTxInfo { + pub(crate) tx_hash: H256, + // True if the transaction was soft-cancelled. Soft-cancellation is when the RPC endpoint + // accepts the cancel without an onchain transaction. + pub(crate) soft_cancelled: bool, +} + #[derive(Debug)] pub(crate) enum TxStatus { Pending, @@ -57,6 +68,9 @@ pub(crate) enum TxSenderError { /// Nonce too low #[error("nonce too low")] NonceTooLow, + /// Soft cancellation failed + #[error("soft cancel failed")] + SoftCancelFailed, /// All other errors #[error(transparent)] Other(#[from] anyhow::Error), @@ -74,6 +88,14 @@ pub(crate) trait TransactionSender: Send + Sync + 'static { expected_storage: &ExpectedStorage, ) -> Result; + async fn cancel_transaction( + &self, + tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result; + async fn get_transaction_status(&self, tx_hash: H256) -> Result; async fn wait_until_mined(&self, tx_hash: H256) -> Result>; @@ -213,6 +235,22 @@ where Ok((tx.rlp_signed(&signature), nonce)) } +fn create_hard_cancel_tx( + from: Address, + to: Address, + nonce: U256, + gas_fees: GasFees, +) -> TypedTransaction { + Eip1559TransactionRequest::new() + .from(from) + .to(to) + .nonce(nonce) + .max_fee_per_gas(gas_fees.max_fee_per_gas) + .max_priority_fee_per_gas(gas_fees.max_priority_fee_per_gas) + .data(Bytes::new()) + .into() +} + impl From for TxSenderError { fn from(value: ProviderError) -> Self { match &value { diff --git a/crates/builder/src/sender/raw.rs b/crates/builder/src/sender/raw.rs index afaacab95..4495606e4 100644 --- a/crates/builder/src/sender/raw.rs +++ b/crates/builder/src/sender/raw.rs @@ -18,13 +18,16 @@ use async_trait::async_trait; use ethers::{ middleware::SignerMiddleware, providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256}, + types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256, U256}, }; use ethers_signers::Signer; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; -use super::Result; -use crate::sender::{fill_and_sign, SentTxInfo, TransactionSender, TxStatus}; +use super::{CancelTxInfo, Result}; +use crate::sender::{ + create_hard_cancel_tx, fill_and_sign, SentTxInfo, TransactionSender, TxStatus, +}; #[derive(Debug)] pub(crate) struct RawTransactionSender @@ -59,6 +62,29 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; + + let tx_hash = self + .provider + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await?; + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let tx = self .provider diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index b5a452615..775ec0905 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use anyhow::{bail, Context}; use async_trait::async_trait; -use ethers::types::{transaction::eip2718::TypedTransaction, H256, U256}; +use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; use rundler_provider::Provider; use rundler_sim::ExpectedStorage; use rundler_types::GasFees; @@ -47,6 +47,16 @@ pub(crate) trait TransactionTracker: Send + Sync + 'static { expected_stroage: &ExpectedStorage, ) -> TransactionTrackerResult; + /// Cancel the latest transaction in the tracker. + /// + /// Returns: An option containing the hash of the transaction that was used to cancel. If the option + /// is empty, then either no transaction was cancelled or the cancellation was a "soft-cancel." + async fn cancel_transaction( + &mut self, + to: Address, + estimated_fees: GasFees, + ) -> TransactionTrackerResult>; + /// Checks: /// 1. One of our transactions mines (not necessarily the one just sent). /// 2. All our send transactions have dropped. @@ -260,6 +270,54 @@ where Ok(sent_tx.tx_hash) } + async fn cancel_transaction( + &mut self, + to: Address, + estimated_fees: GasFees, + ) -> TransactionTrackerResult> { + let (tx_hash, gas_fees) = match self.transactions.last() { + Some(tx) => { + let increased_fees = tx + .gas_fees + .increase_by_percent(self.settings.replacement_fee_percent_increase); + let gas_fees = GasFees { + max_fee_per_gas: increased_fees + .max_fee_per_gas + .max(estimated_fees.max_fee_per_gas), + max_priority_fee_per_gas: increased_fees + .max_priority_fee_per_gas + .max(estimated_fees.max_priority_fee_per_gas), + }; + (tx.tx_hash, gas_fees) + } + None => (H256::zero(), estimated_fees), + }; + + let cancel_info = self + .sender + .cancel_transaction(tx_hash, self.nonce, to, gas_fees) + .await?; + + if cancel_info.soft_cancelled { + // If the transaction was soft-cancelled. Reset internal state. + self.reset().await; + return Ok(None); + } + + info!("Sent cancellation tx {:?}", cancel_info.tx_hash); + + self.transactions.push(PendingTransaction { + tx_hash: cancel_info.tx_hash, + gas_fees, + attempt_number: self.attempt_count, + }); + + self.has_dropped = false; + self.attempt_count += 1; + self.update_metrics(); + Ok(Some(cancel_info.tx_hash)) + } + async fn check_for_update(&mut self) -> TransactionTrackerResult> { let external_nonce = self.get_external_nonce().await?; if self.nonce < external_nonce { @@ -345,6 +403,9 @@ impl From for TransactionTrackerError { TxSenderError::ReplacementUnderpriced => { TransactionTrackerError::ReplacementUnderpriced } + TxSenderError::SoftCancelFailed => { + TransactionTrackerError::Other(anyhow::anyhow!("soft cancel failed")) + } TxSenderError::Other(e) => TransactionTrackerError::Other(e), } } From 3c91c62a0859d651369d2f45356e447b9856853e Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 11 Jun 2024 14:59:56 -0500 Subject: [PATCH 06/17] refactor(builder): large refactor of bundle sender state machine and metrics --- crates/builder/src/bundle_sender.rs | 832 +++++++++++++++------------- crates/builder/src/server/local.rs | 3 - 2 files changed, 462 insertions(+), 373 deletions(-) diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 67375f0c5..8a7a37af2 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -57,10 +57,11 @@ pub(crate) struct BundleSenderImpl { beneficiary: Address, proposer: P, entry_point: E, - transaction_tracker: T, + transaction_tracker: Option, pool: C, settings: Settings, event_sender: broadcast::Sender>, + metrics: BuilderMetrics, _uo_type: PhantomData, } @@ -91,9 +92,6 @@ pub enum SendBundleResult { tx_hash: H256, }, NoOperationsInitially, - NoOperationsAfterFeeIncreases { - attempt_number: u64, - }, StalledAtMaxFeeIncreases, Error(anyhow::Error), } @@ -124,314 +122,23 @@ where /// next one. #[instrument(skip_all, fields(entry_point = self.entry_point.address().to_string(), builder_index = self.builder_index))] async fn send_bundles_in_loop(mut self) -> anyhow::Result<()> { - // State of the sender loop - enum State { - // Building a bundle, optionally waiting for a trigger to send it - // (wait_for_trigger, fee_increase_count) - Building(bool, u64), - // Waiting for a bundle to be mined - // (wait_until_block, fee_increase_count) - Pending(u64, u64), - // Cancelling the last transaction - // (fee_increase_count) - Cancelling(u64), - // Waiting for a cancellation transaction to be mined - // (wait_until_block, fee_increase_count) - CancelPending(u64, u64), - } - - // initial state - let mut state = State::Building(true, 0); - - // response to manual caller - let mut send_bundle_response = None; - let mut send_bundle_result = None; - // trigger for sending bundles - let mut sender_trigger = BundleSenderTrigger::new( + let sender_trigger = BundleSenderTrigger::new( &self.pool, self.bundle_action_receiver.take().unwrap(), Duration::from_millis(self.chain_spec.bundle_max_send_interval_millis), ) .await?; - loop { - match state { - State::Building(wait_for_trigger, fee_increase_count) => { - if wait_for_trigger { - send_bundle_response = sender_trigger.wait_for_trigger().await?; - - // process any nonce updates, ignore result - self.check_for_transaction_update().await; - } - - // send bundle - debug!( - "Building bundle on block {}", - sender_trigger.last_block.block_number - ); - let result = self.send_bundle(fee_increase_count).await; - - // handle result - match result { - Ok(SendBundleAttemptResult::Success) => { - // sent the bundle - info!("Bundle sent successfully"); - state = State::Pending( - sender_trigger.last_block.block_number - + self.settings.max_blocks_to_wait_for_mine, - 0, - ); - } - Ok(SendBundleAttemptResult::NoOperations) => { - debug!("No operations in bundle"); - - if fee_increase_count > 0 { - warn!("Abandoning bundle after fee increases {fee_increase_count}, no operations available"); - BuilderMetrics::increment_bundle_txns_abandoned( - self.builder_index, - self.entry_point.address(), - ); - send_bundle_result = - Some(SendBundleResult::NoOperationsAfterFeeIncreases { - attempt_number: fee_increase_count, - }); - - // abandon the bundle by resetting the tracker and starting a new bundle process - // If the node we are using still has the transaction in the mempool, its - // possible we will get a `ReplacementUnderpriced` on the next iteration - // and will start a cancellation. - self.transaction_tracker.reset().await; - state = State::Building(true, 0); - } else { - debug!("No operations available, waiting for next trigger"); - send_bundle_result = Some(SendBundleResult::NoOperationsInitially); - state = State::Building(true, 0); - } - } - Ok(SendBundleAttemptResult::NonceTooLow) => { - // reset the transaction tracker and try again - info!("Nonce too low, starting new bundle attempt"); - self.transaction_tracker.reset().await; - state = State::Building(true, 0); - } - Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { - info!( - "Replacement transaction underpriced, entering cancellation loop" - ); - self.transaction_tracker.reset().await; - state = State::Cancelling(0); - } - Err(error) => { - error!("Bundle send error {error:?}"); - BuilderMetrics::increment_bundle_txns_failed( - self.builder_index, - self.entry_point.address(), - ); - self.transaction_tracker.reset().await; - send_bundle_result = Some(SendBundleResult::Error(error)); - state = State::Building(true, 0); - } - } - } - State::Pending(until, fee_increase_count) => { - sender_trigger.wait_for_block().await?; - - // check for transaction update - if let Some(update) = self.check_for_transaction_update().await { - match update { - TrackerUpdate::Mined { - block_number, - attempt_number, - gas_limit, - gas_used, - tx_hash, - nonce, - .. - } => { - // mined! - info!("Bundle transaction mined"); - BuilderMetrics::process_bundle_txn_success( - self.builder_index, - self.entry_point.address(), - gas_limit, - gas_used, - ); - self.emit(BuilderEvent::transaction_mined( - self.builder_index, - tx_hash, - nonce.low_u64(), - block_number, - )); - send_bundle_result = Some(SendBundleResult::Success { - block_number, - attempt_number, - tx_hash, - }); - state = State::Building(true, 0); - } - TrackerUpdate::LatestTxDropped { nonce } => { - // try again, don't wait for trigger, re-estimate fees - info!("Latest transaction dropped, starting new bundle attempt"); - self.emit(BuilderEvent::latest_transaction_dropped( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_dropped( - self.builder_index, - self.entry_point.address(), - ); - - // force reset the transaction tracker - self.transaction_tracker.reset().await; - state = State::Building(true, 0); - } - TrackerUpdate::NonceUsedForOtherTx { nonce } => { - // try again, don't wait for trigger, re-estimate fees - info!("Nonce used externally, starting new bundle attempt"); - self.emit(BuilderEvent::nonce_used_for_other_transaction( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_nonce_used( - self.builder_index, - self.entry_point.address(), - ); - state = State::Building(true, 0); - } - } - } else if sender_trigger.last_block().block_number >= until { - // start replacement, don't wait for trigger. Continue - // to attempt until there are no longer any UOs priced high enough - // to bundle. - info!( - "Not mined after {} blocks, increasing fees, attempt: {}", - self.settings.max_blocks_to_wait_for_mine, - fee_increase_count + 1 - ); - BuilderMetrics::increment_bundle_txn_fee_increases( - self.builder_index, - self.entry_point.address(), - ); - state = State::Building(false, fee_increase_count + 1); - } - } - State::Cancelling(fee_increase_count) => { - // cancel the transaction - info!("Cancelling last transaction"); - - let (estimated_fees, _) = self - .proposer - .estimate_gas_fees(None) - .await - .unwrap_or_default(); - - let cancel_res = self - .transaction_tracker - .cancel_transaction(self.entry_point.address(), estimated_fees) - .await; - - match cancel_res { - Ok(Some(_)) => { - info!("Cancellation transaction sent, waiting for confirmation"); - BuilderMetrics::increment_cancellation_txns_sent( - self.builder_index, - self.entry_point.address(), - ); - - state = State::CancelPending( - sender_trigger.last_block.block_number - + self.settings.max_blocks_to_wait_for_mine, - fee_increase_count, - ); - } - Ok(None) => { - info!("Soft cancellation or no transaction to cancel, starting new bundle attempt"); - BuilderMetrics::increment_soft_cancellations( - self.builder_index, - self.entry_point.address(), - ); - - state = State::Building(true, 0); - } - Err(TransactionTrackerError::ReplacementUnderpriced) => { - info!("Replacement transaction underpriced during cancellation, trying again"); - state = State::Cancelling(fee_increase_count + 1); - } - Err(TransactionTrackerError::NonceTooLow) => { - // reset the transaction tracker and try again - info!("Nonce too low during cancellation, starting new bundle attempt"); - self.transaction_tracker.reset().await; - state = State::Building(true, 0); - } - Err(e) => { - error!("Failed to cancel transaction, moving back to building state: {e:#?}"); - BuilderMetrics::increment_cancellation_txns_failed( - self.builder_index, - self.entry_point.address(), - ); - state = State::Building(true, 0); - } - } - } - State::CancelPending(until, fee_increase_count) => { - sender_trigger.wait_for_block().await?; - - // check for transaction update - if let Some(update) = self.check_for_transaction_update().await { - match update { - TrackerUpdate::Mined { .. } => { - // mined - info!("Cancellation transaction mined"); - BuilderMetrics::increment_cancellation_txns_mined( - self.builder_index, - self.entry_point.address(), - ); - } - TrackerUpdate::LatestTxDropped { .. } => { - // If a cancellation gets dropped, move to bundling state as there is no - // longer a pending transaction - info!( - "Cancellation transaction dropped, starting new bundle attempt" - ); - // force reset the transaction tracker - self.transaction_tracker.reset().await; - } - TrackerUpdate::NonceUsedForOtherTx { .. } => { - // If a nonce is used externally, move to bundling state as there is no longer - // a pending transaction - info!("Nonce used externally while cancelling, starting new bundle attempt"); - } - } - - state = State::Building(true, 0); - } else if sender_trigger.last_block().block_number >= until { - if fee_increase_count >= self.settings.max_fee_increases { - // abandon the cancellation - warn!("Abandoning cancellation after max fee increases {fee_increase_count}, starting new bundle attempt"); - // force reset the transaction tracker - self.transaction_tracker.reset().await; - state = State::Building(true, 0); - } else { - // start replacement, don't wait for trigger - info!( - "Cancellation not mined after {} blocks, increasing fees, attempt: {}", - self.settings.max_blocks_to_wait_for_mine, - fee_increase_count + 1 - ); - state = State::Cancelling(fee_increase_count + 1); - } - } - } - } + // initial state + let mut state = + SenderMachineState::new(sender_trigger, self.transaction_tracker.take().unwrap()); - // send result to manual caller - if let Some(res) = send_bundle_result.take() { - if let Some(r) = send_bundle_response.take() { - if r.send(res).is_err() { - error!("Failed to send bundle result to manual caller"); - } - } + loop { + if let Err(e) = self.step_state(&mut state).await { + error!("Error in bundle sender loop: {e:#?}"); + self.metrics.increment_state_machine_errors(); + state.reset(); } } } @@ -464,48 +171,264 @@ where chain_spec, beneficiary, proposer, - entry_point, - transaction_tracker, + transaction_tracker: Some(transaction_tracker), pool, settings, event_sender, + metrics: BuilderMetrics { + builder_index, + entry_point: entry_point.address(), + }, + entry_point, _uo_type: PhantomData, } } - async fn check_for_transaction_update(&mut self) -> Option { - let update = self.transaction_tracker.check_for_update().await; - let update = match update { - Ok(update) => update?, - Err(error) => { - error!("Failed to check for transaction updates: {error:#?}"); - return None; + async fn step_state(&mut self, state: &mut SenderMachineState) -> anyhow::Result<()> { + let tracker_update = state.wait_for_trigger().await?; + + match state.inner { + InnerState::Building(building_state) => { + self.handle_building_state(state, building_state).await?; } - }; + InnerState::Pending(pending_state) => { + self.handle_pending_state(state, pending_state, tracker_update) + .await?; + } + InnerState::Cancelling(cancelling_state) => { + self.handle_cancelling_state(state, cancelling_state) + .await?; + } + InnerState::CancelPending(cancel_pending_state) => { + self.handle_cancel_pending_state(state, cancel_pending_state, tracker_update) + .await?; + } + } - match update { - TrackerUpdate::Mined { - tx_hash, - block_number, - attempt_number, - nonce, - .. - } => { - if attempt_number == 0 { - info!("Transaction with hash {tx_hash:?}, nonce {nonce:?}, landed in block {block_number}"); + Ok(()) + } + + async fn handle_building_state( + &mut self, + state: &mut SenderMachineState, + inner: BuildingState, + ) -> anyhow::Result<()> { + // send bundle + let block_number = state.block_number(); + debug!("Building bundle on block {}", block_number); + let result = self.send_bundle(state, inner.fee_increase_count).await; + + // handle result + match result { + Ok(SendBundleAttemptResult::Success) => { + // sent the bundle + info!("Bundle sent successfully"); + state.update(InnerState::Pending(inner.to_pending( + block_number + self.settings.max_blocks_to_wait_for_mine, + ))); + } + Ok(SendBundleAttemptResult::NoOperations) => { + debug!("No operations to bundle"); + if inner.fee_increase_count > 0 { + warn!( + "Abandoning bundle after fee increases {}, no operations available", + inner.fee_increase_count + ); + self.metrics.increment_bundle_txns_abandoned(); + + // abandon the bundle by starting a new bundle process + // If the node we are using still has the transaction in the mempool, its + // possible we will get a `ReplacementUnderpriced` on the next iteration + // and will start a cancellation. + state.reset(); } else { - info!("Transaction with hash {tx_hash:?}, nonce {nonce:?}, landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); + debug!("No operations available, waiting for next trigger"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); } } - TrackerUpdate::LatestTxDropped { nonce } => { - info!("Previous transaction dropped by sender. Nonce: {nonce:?}"); + Ok(SendBundleAttemptResult::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low, starting new bundle attempt"); + state.reset(); } - TrackerUpdate::NonceUsedForOtherTx { nonce } => { - info!("Nonce used by external transaction. Nonce: {nonce:?}"); + Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced, entering cancellation loop"); + state.update(InnerState::Cancelling(inner.to_cancelling())); } - }; + Err(error) => { + error!("Bundle send error {error:?}"); + self.metrics.increment_bundle_txns_failed(); + let send_bundle_result = Some(SendBundleResult::Error(error)); + state.complete(send_bundle_result); + } + } + + Ok(()) + } - Some(update) + async fn handle_pending_state( + &mut self, + state: &mut SenderMachineState, + inner: PendingState, + tracker_update: Option, + ) -> anyhow::Result<()> { + if let Some(update) = tracker_update { + match update { + TrackerUpdate::Mined { + block_number, + attempt_number, + gas_limit, + gas_used, + tx_hash, + nonce, + .. + } => { + info!("Bundle transaction mined"); + self.metrics.process_bundle_txn_success(gas_limit, gas_used); + self.emit(BuilderEvent::transaction_mined( + self.builder_index, + tx_hash, + nonce.low_u64(), + block_number, + )); + let send_bundle_result = Some(SendBundleResult::Success { + block_number, + attempt_number, + tx_hash, + }); + state.complete(send_bundle_result); + } + TrackerUpdate::LatestTxDropped { nonce } => { + // try again, don't wait for trigger, re-estimate fees + info!("Latest transaction dropped, starting new bundle attempt"); + self.emit(BuilderEvent::latest_transaction_dropped( + self.builder_index, + nonce.low_u64(), + )); + self.metrics.increment_bundle_txns_dropped(); + state.reset(); + } + TrackerUpdate::NonceUsedForOtherTx { nonce } => { + // try again, don't wait for trigger, re-estimate fees + info!("Nonce used externally, starting new bundle attempt"); + self.emit(BuilderEvent::nonce_used_for_other_transaction( + self.builder_index, + nonce.low_u64(), + )); + self.metrics.increment_bundle_txns_nonce_used(); + state.reset(); + } + } + } else if state.block_number() >= inner.until { + // start replacement, don't wait for trigger. Continue + // to attempt until there are no longer any UOs priced high enough + // to bundle. + info!( + "Not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + inner.fee_increase_count + 1 + ); + self.metrics.increment_bundle_txn_fee_increases(); + state.update(InnerState::Building(inner.to_building())) + } + + Ok(()) + } + + async fn handle_cancelling_state( + &mut self, + state: &mut SenderMachineState, + inner: CancellingState, + ) -> anyhow::Result<()> { + info!("Cancelling last transaction"); + + let (estimated_fees, _) = self + .proposer + .estimate_gas_fees(None) + .await + .unwrap_or_default(); + + let cancel_res = state + .transaction_tracker + .cancel_transaction(self.entry_point.address(), estimated_fees) + .await; + + match cancel_res { + Ok(Some(_)) => { + info!("Cancellation transaction sent, waiting for confirmation"); + self.metrics.increment_cancellation_txns_sent(); + + state.update(InnerState::CancelPending(inner.to_cancel_pending( + state.block_number() + self.settings.max_blocks_to_wait_for_mine, + ))); + } + Ok(None) => { + info!("Soft cancellation or no transaction to cancel, starting new bundle attempt"); + self.metrics.increment_soft_cancellations(); + state.reset(); + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced during cancellation, trying again"); + state.update(InnerState::Cancelling(inner.to_self())); + } + Err(TransactionTrackerError::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low during cancellation, starting new bundle attempt"); + state.reset(); + } + Err(e) => { + error!("Failed to cancel transaction, moving back to building state: {e:#?}"); + self.metrics.increment_cancellation_txns_failed(); + state.reset(); + } + } + + Ok(()) + } + + async fn handle_cancel_pending_state( + &mut self, + state: &mut SenderMachineState, + inner: CancelPendingState, + tracker_update: Option, + ) -> anyhow::Result<()> { + // check for transaction update + if let Some(update) = tracker_update { + match update { + TrackerUpdate::Mined { .. } => { + // mined + info!("Cancellation transaction mined"); + self.metrics.increment_cancellation_txns_mined(); + } + TrackerUpdate::LatestTxDropped { .. } => { + // If a cancellation gets dropped, move to bundling state as there is no + // longer a pending transaction + info!("Cancellation transaction dropped, starting new bundle attempt"); + } + TrackerUpdate::NonceUsedForOtherTx { .. } => { + // If a nonce is used externally, move to bundling state as there is no longer + // a pending transaction + info!("Nonce used externally while cancelling, starting new bundle attempt"); + } + } + state.reset(); + } else if state.block_number() >= inner.until { + if inner.fee_increase_count >= self.settings.max_fee_increases { + // abandon the cancellation + warn!("Abandoning cancellation after max fee increases {}, starting new bundle attempt", inner.fee_increase_count); + state.reset(); + } else { + // start replacement, don't wait for trigger + info!( + "Cancellation not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + inner.fee_increase_count + 1 + ); + state.update(InnerState::Cancelling(inner.to_cancelling())); + } + } + + Ok(()) } /// Constructs a bundle and sends it to the entry point as a transaction. @@ -516,9 +439,10 @@ where /// are no ops that meet the fee requirements. async fn send_bundle( &mut self, + state: &mut SenderMachineState, fee_increase_count: u64, ) -> anyhow::Result { - let (nonce, required_fees) = self.transaction_tracker.get_nonce_and_required_fees()?; + let (nonce, required_fees) = state.transaction_tracker.get_nonce_and_required_fees()?; let Some(bundle_tx) = self .get_bundle_tx(nonce, required_fees, fee_increase_count > 0) @@ -539,9 +463,9 @@ where op_hashes, } = bundle_tx; - BuilderMetrics::increment_bundle_txns_sent(self.builder_index, self.entry_point.address()); + self.metrics.increment_bundle_txns_sent(); - let send_result = self + let send_result = state .transaction_tracker .send_transaction(tx.clone(), &expected_storage) .await; @@ -567,10 +491,7 @@ where Ok(SendBundleAttemptResult::NonceTooLow) } Err(TransactionTrackerError::ReplacementUnderpriced) => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced( - self.builder_index, - self.entry_point.address(), - ); + self.metrics.increment_bundle_txn_replacement_underpriced(); warn!("Replacement transaction underpriced"); Ok(SendBundleAttemptResult::ReplacementUnderpriced) } @@ -669,6 +590,174 @@ where } } +struct SenderMachineState { + trigger: BundleSenderTrigger, + transaction_tracker: T, + send_bundle_response: Option>, + inner: InnerState, + requires_reset: bool, +} + +impl SenderMachineState { + fn new(trigger: BundleSenderTrigger, transaction_tracker: T) -> Self { + Self { + trigger, + transaction_tracker, + send_bundle_response: None, + inner: InnerState::new(), + requires_reset: false, + } + } + + fn update(&mut self, inner: InnerState) { + self.inner = inner; + } + + // resets the state machine to the initial state, doesn't wait for next trigger + fn reset(&mut self) { + self.requires_reset = true; + let building_state = BuildingState { + wait_for_trigger: false, + fee_increase_count: 0, + }; + self.inner = InnerState::Building(building_state); + } + + fn complete(&mut self, result: Option) { + if let Some(result) = result { + if let Some(r) = self.send_bundle_response.take() { + if r.send(result).is_err() { + error!("Failed to send bundle result to manual caller"); + } + } + } + self.inner = InnerState::new(); + } + + async fn wait_for_trigger(&mut self) -> anyhow::Result> { + if self.requires_reset { + self.transaction_tracker.reset().await; + self.requires_reset = false; + } + + match &self.inner { + InnerState::Building(s) => { + if !s.wait_for_trigger { + return Ok(None); + } + + self.send_bundle_response = self.trigger.wait_for_trigger().await?; + self.transaction_tracker + .check_for_update() + .await + .map_err(|e| anyhow::anyhow!("transaction tracker update error {e:?}")) + } + InnerState::Pending(..) | InnerState::CancelPending(..) => { + self.trigger.wait_for_block().await?; + self.transaction_tracker + .check_for_update() + .await + .map_err(|e| anyhow::anyhow!("transaction tracker update error {e:?}")) + } + InnerState::Cancelling(..) => Ok(None), + } + } + + fn block_number(&self) -> u64 { + self.trigger.last_block().block_number + } +} + +// State of the sender loop +enum InnerState { + // Building a bundle, optionally waiting for a trigger to send it + Building(BuildingState), + // Waiting for a bundle to be mined + Pending(PendingState), + // Cancelling the last transaction + Cancelling(CancellingState), + // Waiting for a cancellation transaction to be mined + CancelPending(CancelPendingState), +} + +impl InnerState { + fn new() -> Self { + InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + }) + } +} + +#[derive(Debug, Clone, Copy)] +struct BuildingState { + wait_for_trigger: bool, + fee_increase_count: u64, +} + +impl BuildingState { + fn to_pending(self, until: u64) -> PendingState { + PendingState { + until, + fee_increase_count: self.fee_increase_count, + } + } + + fn to_cancelling(self) -> CancellingState { + CancellingState { + fee_increase_count: 0, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct PendingState { + until: u64, + fee_increase_count: u64, +} + +impl PendingState { + fn to_building(self) -> BuildingState { + BuildingState { + wait_for_trigger: true, + fee_increase_count: self.fee_increase_count + 1, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct CancellingState { + fee_increase_count: u64, +} + +impl CancellingState { + fn to_self(mut self) -> Self { + self.fee_increase_count += 1; + self + } + + fn to_cancel_pending(self, until: u64) -> CancelPendingState { + CancelPendingState { + until, + fee_increase_count: self.fee_increase_count, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct CancelPendingState { + until: u64, + fee_increase_count: u64, +} + +impl CancelPendingState { + fn to_cancelling(self) -> CancellingState { + CancellingState { + fee_increase_count: self.fee_increase_count + 1, + } + } +} + struct BundleSenderTrigger { bundling_mode: BundlingMode, block_rx: UnboundedReceiver, @@ -826,69 +915,72 @@ impl BundleSenderTrigger { } } -struct BuilderMetrics {} +#[derive(Debug, Clone)] +struct BuilderMetrics { + builder_index: u64, + entry_point: Address, +} impl BuilderMetrics { - fn increment_bundle_txns_sent(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_sent", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()) + fn increment_bundle_txns_sent(&self) { + metrics::counter!("builder_bundle_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()) .increment(1); } - fn process_bundle_txn_success( - builder_index: u64, - entry_point: Address, - gas_limit: Option, - gas_used: Option, - ) { - metrics::counter!("builder_bundle_txns_success", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn process_bundle_txn_success(&self, gas_limit: Option, gas_used: Option) { + metrics::counter!("builder_bundle_txns_success", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); if let Some(limit) = gas_limit { - metrics::counter!("builder_bundle_gas_limit", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(limit.as_u64()); + metrics::counter!("builder_bundle_gas_limit", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(limit.as_u64()); } if let Some(used) = gas_used { - metrics::counter!("builder_bundle_gas_used", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(used.as_u64()); + metrics::counter!("builder_bundle_gas_used", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(used.as_u64()); } } - fn increment_bundle_txns_dropped(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_dropped", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_bundle_txns_dropped(&self) { + metrics::counter!("builder_bundle_txns_dropped", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } // used when we decide to stop trying a transaction - fn increment_bundle_txns_abandoned(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_abandoned", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_bundle_txns_abandoned(&self) { + metrics::counter!("builder_bundle_txns_abandoned", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } // used when sending a transaction fails - fn increment_bundle_txns_failed(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_failed", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_bundle_txns_failed(&self) { + metrics::counter!("builder_bundle_txns_failed", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txns_nonce_used(&self) { + metrics::counter!("builder_bundle_txns_nonce_used", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_bundle_txns_nonce_used(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_txns_nonce_used", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_bundle_txn_fee_increases(&self) { + metrics::counter!("builder_bundle_fee_increases", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_bundle_txn_fee_increases(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_fee_increases", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_bundle_txn_replacement_underpriced(&self) { + metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_bundle_txn_replacement_underpriced(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_cancellation_txns_sent(&self) { + metrics::counter!("builder_cancellation_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_cancellation_txns_sent(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_cancellation_txns_sent", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_cancellation_txns_mined(&self) { + metrics::counter!("builder_cancellation_txns_mined", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_cancellation_txns_mined(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_cancellation_txns_mined", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_soft_cancellations(&self) { + metrics::counter!("builder_soft_cancellations", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_soft_cancellations(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_soft_cancellations", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_cancellation_txns_failed(&self) { + metrics::counter!("builder_cancellation_txns_failed", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } - fn increment_cancellation_txns_failed(builder_index: u64, entry_point: Address) { - metrics::counter!("builder_cancellation_txns_failed", "entry_point" => entry_point.to_string(), "builder_index" => builder_index.to_string()).increment(1); + fn increment_state_machine_errors(&self) { + metrics::counter!("builder_state_machine_errors", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } } diff --git a/crates/builder/src/server/local.rs b/crates/builder/src/server/local.rs index 76bc0cf87..fa6758ab3 100644 --- a/crates/builder/src/server/local.rs +++ b/crates/builder/src/server/local.rs @@ -186,9 +186,6 @@ impl LocalBuilderServerRunner { SendBundleResult::NoOperationsInitially => { Err(anyhow::anyhow!("no ops to send").into()) }, - SendBundleResult::NoOperationsAfterFeeIncreases { .. } => { - Err(anyhow::anyhow!("bundle initially had operations, but after increasing gas fees it was empty").into()) - }, SendBundleResult::StalledAtMaxFeeIncreases => Err(anyhow::anyhow!("stalled at max fee increases").into()), SendBundleResult::Error(e) => Err(anyhow::anyhow!("send bundle error: {e:?}").into()), } From 94fc3807594fea9b9bd71e7b8a8ab86f5f00a461 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Wed, 12 Jun 2024 16:55:55 -0500 Subject: [PATCH 07/17] fix(builder): rework raw sender to support dropped/conditional/split-rpcs correctly --- bin/rundler/src/cli/builder.rs | 58 ++++++++-- crates/builder/src/lib.rs | 3 +- crates/builder/src/sender/conditional.rs | 130 ---------------------- crates/builder/src/sender/mod.rs | 62 ++++++++--- crates/builder/src/sender/raw.rs | 59 +++++++--- crates/builder/src/task.rs | 34 ++++-- crates/builder/src/transaction_tracker.rs | 78 ++++++------- docs/cli.md | 28 +++-- 8 files changed, 220 insertions(+), 232 deletions(-) delete mode 100644 crates/builder/src/sender/conditional.rs diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index 0cd5bed81..94f9b83c4 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -17,8 +17,8 @@ use anyhow::Context; use clap::Args; use rundler_builder::{ self, BloxrouteSenderArgs, BuilderEvent, BuilderEventKind, BuilderTask, BuilderTaskArgs, - EntryPointBuilderSettings, FlashbotsSenderArgs, LocalBuilderBuilder, TransactionSenderArgs, - TransactionSenderKind, + EntryPointBuilderSettings, FlashbotsSenderArgs, LocalBuilderBuilder, RawSenderArgs, + TransactionSenderArgs, TransactionSenderKind, }; use rundler_pool::RemotePoolClient; use rundler_sim::{MempoolConfigs, PriorityFeeMode}; @@ -115,7 +115,7 @@ pub struct BuilderArgs { /// If present, the url of the ETH provider that will be used to send /// transactions. Defaults to the value of `node_http`. /// - /// Only used when BUILDER_SENDER is "raw" or "conditional" + /// Only used when BUILDER_SENDER is "raw" #[arg( long = "builder.submit_url", name = "builder.submit_url", @@ -123,6 +123,40 @@ pub struct BuilderArgs { )] pub submit_url: Option, + /// If present, the url of the ETH provider that will be used to check + /// transaction status. Else will use the node http for status. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.use_submit_for_status", + name = "builder.use_submit_for_status", + env = "BUILDER_USE_SUBMIT_FOR_STATUS", + default_value = "false" + )] + pub use_submit_for_status: bool, + + /// Use the conditional RPC endpoint for transaction submission. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.use_conditional_rpc", + name = "builder.use_conditional_rpc", + env = "BUILDER_USE_CONDITIONAL_RPC", + default_value = "false" + )] + pub use_conditional_rpc: bool, + + /// If the "dropped" status is unsupported by the status provider. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.dropped_status_unsupported", + name = "builder.dropped_status_unsupported", + env = "BUILDER_DROPPED_STATUS_UNSUPPORTED", + default_value = "false" + )] + pub dropped_status_unsupported: bool, + /// A list of builders to pass into the Flashbots Relay RPC. /// /// Only used when BUILDER_SENDER is "flashbots" @@ -216,7 +250,6 @@ impl BuilderArgs { .node_http .clone() .context("should have a node HTTP URL")?; - let submit_url = self.submit_url.clone().unwrap_or_else(|| rpc_url.clone()); let mempool_configs = match &common.mempool_config_path { Some(path) => get_json_config::(path, &common.aws_region) @@ -264,7 +297,7 @@ impl BuilderArgs { )); } - let sender_args = self.sender_args(&chain_spec)?; + let sender_args = self.sender_args(&chain_spec, &rpc_url)?; Ok(BuilderTaskArgs { entry_points, @@ -281,7 +314,6 @@ impl BuilderArgs { redis_lock_ttl_millis: self.redis_lock_ttl_millis, max_bundle_size: self.max_bundle_size, max_bundle_gas: common.max_bundle_gas, - submit_url, bundle_priority_fee_overhead_percent: common.bundle_priority_fee_overhead_percent, priority_fee_mode, sender_args, @@ -294,10 +326,18 @@ impl BuilderArgs { }) } - fn sender_args(&self, chain_spec: &ChainSpec) -> anyhow::Result { + fn sender_args( + &self, + chain_spec: &ChainSpec, + rpc_url: &str, + ) -> anyhow::Result { match self.sender_type { - TransactionSenderKind::Raw => Ok(TransactionSenderArgs::Raw), - TransactionSenderKind::Conditional => Ok(TransactionSenderArgs::Conditional), + TransactionSenderKind::Raw => Ok(TransactionSenderArgs::Raw(RawSenderArgs { + submit_url: self.submit_url.clone().unwrap_or_else(|| rpc_url.into()), + use_submit_for_status: self.use_submit_for_status, + dropped_status_supported: !self.dropped_status_unsupported, + use_conditional_rpc: self.use_conditional_rpc, + })), TransactionSenderKind::Flashbots => { if !chain_spec.flashbots_enabled { return Err(anyhow::anyhow!("Flashbots sender is not enabled for chain")); diff --git a/crates/builder/src/lib.rs b/crates/builder/src/lib.rs index 3363679ff..ceee246fc 100644 --- a/crates/builder/src/lib.rs +++ b/crates/builder/src/lib.rs @@ -27,7 +27,8 @@ pub use emit::{BuilderEvent, BuilderEventKind}; mod sender; pub use sender::{ - BloxrouteSenderArgs, FlashbotsSenderArgs, TransactionSenderArgs, TransactionSenderKind, + BloxrouteSenderArgs, FlashbotsSenderArgs, RawSenderArgs, TransactionSenderArgs, + TransactionSenderKind, }; mod server; diff --git a/crates/builder/src/sender/conditional.rs b/crates/builder/src/sender/conditional.rs deleted file mode 100644 index 5e1ca3e35..000000000 --- a/crates/builder/src/sender/conditional.rs +++ /dev/null @@ -1,130 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::sync::Arc; - -use anyhow::Context; -use ethers::{ - middleware::SignerMiddleware, - providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256, U256}, -}; -use ethers_signers::Signer; -use rundler_sim::ExpectedStorage; -use rundler_types::GasFees; -use serde_json::json; -use tonic::async_trait; - -use super::{ - create_hard_cancel_tx, fill_and_sign, CancelTxInfo, Result, SentTxInfo, TransactionSender, - TxStatus, -}; - -pub(crate) struct ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - // The `SignerMiddleware` specifically needs to wrap a `Provider`, and not - // just any `Middleware`, because `.request()` is only on `Provider` and not - // on `Middleware`. - provider: SignerMiddleware>, S>, -} - -#[async_trait] -impl TransactionSender for ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - async fn send_transaction( - &self, - tx: TypedTransaction, - expected_storage: &ExpectedStorage, - ) -> Result { - let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; - - let tx_hash = self - .provider - .provider() - .request( - "eth_sendRawTransactionConditional", - (raw_tx, json!({ "knownAccounts": expected_storage })), - ) - .await?; - - Ok(SentTxInfo { nonce, tx_hash }) - } - - async fn cancel_transaction( - &self, - _tx_hash: H256, - nonce: U256, - to: Address, - gas_fees: GasFees, - ) -> Result { - let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); - - let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; - - let tx_hash = self - .provider - .provider() - .request("eth_sendRawTransaction", (raw_tx,)) - .await?; - - Ok(CancelTxInfo { - tx_hash, - soft_cancelled: false, - }) - } - - async fn get_transaction_status(&self, tx_hash: H256) -> Result { - let tx = self - .provider - .get_transaction(tx_hash) - .await - .context("provider should return transaction status")?; - Ok(match tx { - None => TxStatus::Dropped, - Some(tx) => match tx.block_number { - None => TxStatus::Pending, - Some(block_number) => TxStatus::Mined { - block_number: block_number.as_u64(), - }, - }, - }) - } - - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(PendingTransaction::new(tx_hash, self.provider.inner()) - .await - .context("should wait for transaction to be mined or dropped")?) - } - - fn address(&self) -> Address { - self.provider.address() - } -} - -impl ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - pub(crate) fn new(provider: Arc>, signer: S) -> Self { - Self { - provider: SignerMiddleware::new(provider, signer), - } - } -} diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index df4214a9a..d2bb50fed 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -12,7 +12,6 @@ // If not, see https://www.gnu.org/licenses/. mod bloxroute; -mod conditional; mod flashbots; mod raw; use std::{sync::Arc, time::Duration}; @@ -20,7 +19,6 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context; use async_trait::async_trait; pub(crate) use bloxroute::PolygonBloxrouteTransactionSender; -pub(crate) use conditional::ConditionalTransactionSender; use enum_dispatch::enum_dispatch; use ethers::{ prelude::SignerMiddleware, @@ -111,7 +109,6 @@ where FS: Signer + 'static, { Raw(RawTransactionSender), - Conditional(ConditionalTransactionSender), Flashbots(FlashbotsTransactionSender), PolygonBloxroute(PolygonBloxrouteTransactionSender), } @@ -122,8 +119,6 @@ where pub enum TransactionSenderKind { /// Raw transaction sender Raw, - /// Conditional transaction sender - Conditional, /// Flashbots transaction sender Flashbots, /// Bloxroute transaction sender @@ -134,15 +129,26 @@ pub enum TransactionSenderKind { #[derive(Debug, Clone)] pub enum TransactionSenderArgs { /// Raw transaction sender - Raw, - /// Conditional transaction sender - Conditional, + Raw(RawSenderArgs), /// Flashbots transaction sender Flashbots(FlashbotsSenderArgs), /// Bloxroute transaction sender Bloxroute(BloxrouteSenderArgs), } +/// Raw sender arguments +#[derive(Debug, Clone)] +pub struct RawSenderArgs { + /// Submit URL + pub submit_url: String, + /// Use submit for status + pub use_submit_for_status: bool, + /// If the "dropped" status is supported by the status provider + pub dropped_status_supported: bool, + /// If the sender should use the conditional endpoint + pub use_conditional_rpc: bool, +} + /// Bloxroute sender arguments #[derive(Debug, Clone)] pub struct BloxrouteSenderArgs { @@ -166,21 +172,47 @@ pub struct FlashbotsSenderArgs { impl TransactionSenderArgs { pub(crate) fn into_sender( self, - client: Arc>, + rpc_provider: Arc>, + submit_provider: Option>>, signer: S, eth_poll_interval: Duration, ) -> std::result::Result, SenderConstructorErrors> { let sender = match self { - Self::Raw => TransactionSenderEnum::Raw(RawTransactionSender::new(client, signer)), - Self::Conditional => TransactionSenderEnum::Conditional( - ConditionalTransactionSender::new(client, signer), - ), + Self::Raw(args) => { + if let Some(submit_provider) = submit_provider { + if args.use_submit_for_status { + TransactionSenderEnum::Raw(RawTransactionSender::new( + Arc::clone(&submit_provider), + submit_provider, + signer, + args.dropped_status_supported, + args.use_conditional_rpc, + )) + } else { + TransactionSenderEnum::Raw(RawTransactionSender::new( + rpc_provider, + submit_provider, + signer, + args.dropped_status_supported, + args.use_conditional_rpc, + )) + } + } else { + TransactionSenderEnum::Raw(RawTransactionSender::new( + Arc::clone(&rpc_provider), + rpc_provider, + signer, + args.dropped_status_supported, + args.use_conditional_rpc, + )) + } + } Self::Flashbots(args) => { let flashbots_signer = args.auth_key.parse().context("should parse auth key")?; TransactionSenderEnum::Flashbots(FlashbotsTransactionSender::new( - client, + rpc_provider, signer, flashbots_signer, args.builders, @@ -190,7 +222,7 @@ impl TransactionSenderArgs { } Self::Bloxroute(args) => { TransactionSenderEnum::PolygonBloxroute(PolygonBloxrouteTransactionSender::new( - client, + rpc_provider, signer, eth_poll_interval, &args.header, diff --git a/crates/builder/src/sender/raw.rs b/crates/builder/src/sender/raw.rs index 4495606e4..3e23f454f 100644 --- a/crates/builder/src/sender/raw.rs +++ b/crates/builder/src/sender/raw.rs @@ -23,6 +23,7 @@ use ethers::{ use ethers_signers::Signer; use rundler_sim::ExpectedStorage; use rundler_types::GasFees; +use serde_json::json; use super::{CancelTxInfo, Result}; use crate::sender::{ @@ -35,10 +36,13 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { + provider: Arc>, // The `SignerMiddleware` specifically needs to wrap a `Provider`, and not // just any `Middleware`, because `.request()` is only on `Provider` and not // on `Middleware`. - provider: SignerMiddleware>, S>, + submitter: SignerMiddleware>, S>, + dropped_status_supported: bool, + use_conditional_rpc: bool, } #[async_trait] @@ -50,15 +54,25 @@ where async fn send_transaction( &self, tx: TypedTransaction, - _expected_storage: &ExpectedStorage, + expected_storage: &ExpectedStorage, ) -> Result { - let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; + let (raw_tx, nonce) = fill_and_sign(&self.submitter, tx).await?; + + let tx_hash = if self.use_conditional_rpc { + self.submitter + .provider() + .request( + "eth_sendRawTransactionConditional", + (raw_tx, json!({ "knownAccounts": expected_storage })), + ) + .await? + } else { + self.submitter + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await? + }; - let tx_hash = self - .provider - .provider() - .request("eth_sendRawTransaction", (raw_tx,)) - .await?; Ok(SentTxInfo { nonce, tx_hash }) } @@ -69,12 +83,12 @@ where to: Address, gas_fees: GasFees, ) -> Result { - let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); + let tx = create_hard_cancel_tx(self.submitter.address(), to, nonce, gas_fees); - let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; + let (raw_tx, _) = fill_and_sign(&self.submitter, tx).await?; let tx_hash = self - .provider + .submitter .provider() .request("eth_sendRawTransaction", (raw_tx,)) .await?; @@ -92,7 +106,13 @@ where .await .context("provider should return transaction status")?; Ok(match tx { - None => TxStatus::Dropped, + None => { + if self.dropped_status_supported { + TxStatus::Dropped + } else { + TxStatus::Pending + } + } Some(tx) => match tx.block_number { None => TxStatus::Pending, Some(block_number) => TxStatus::Mined { @@ -109,7 +129,7 @@ where } fn address(&self) -> Address { - self.provider.address() + self.submitter.address() } } @@ -118,9 +138,18 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { - pub(crate) fn new(provider: Arc>, signer: S) -> Self { + pub(crate) fn new( + provider: Arc>, + submitter: Arc>, + signer: S, + dropped_status_supported: bool, + use_conditional_rpc: bool, + ) -> Self { Self { - provider: SignerMiddleware::new(provider, signer), + provider, + submitter: SignerMiddleware::new(submitter, signer), + dropped_status_supported, + use_conditional_rpc, } } } diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index ee8565955..36e05c011 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -77,8 +77,6 @@ pub struct Args { pub max_bundle_size: u64, /// Maximum bundle size in gas limit pub max_bundle_gas: u64, - /// URL to submit bundles too - pub submit_url: String, /// Percentage to add to the network priority fee for the bundle priority fee pub bundle_priority_fee_overhead_percent: u64, /// Priority fee mode to use for operation priority fee minimums @@ -133,6 +131,11 @@ where async fn run(mut self: Box, shutdown_token: CancellationToken) -> anyhow::Result<()> { let provider = rundler_provider::new_provider(&self.args.rpc_url, Some(self.args.eth_poll_interval))?; + let submit_provider = if let TransactionSenderArgs::Raw(args) = &self.args.sender_args { + Some(rundler_provider::new_provider(&args.submit_url, None)?) + } else { + None + }; let ep_v0_6 = EthersEntryPointV0_6::new( self.args.chain_spec.entry_point_address_v0_6, @@ -154,14 +157,24 @@ where match ep.version { EntryPointVersion::V0_6 => { let (handles, actions) = self - .create_builders_v0_6(ep, Arc::clone(&provider), ep_v0_6.clone()) + .create_builders_v0_6( + ep, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_6.clone(), + ) .await?; sender_handles.extend(handles); bundle_sender_actions.extend(actions); } EntryPointVersion::V0_7 => { let (handles, actions) = self - .create_builders_v0_7(ep, Arc::clone(&provider), ep_v0_7.clone()) + .create_builders_v0_7( + ep, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_7.clone(), + ) .await?; sender_handles.extend(handles); bundle_sender_actions.extend(actions); @@ -246,6 +259,7 @@ where &self, ep: &EntryPointBuilderSettings, provider: Arc>, + submit_provider: Option>>, ep_v0_6: E, ) -> anyhow::Result<( Vec>>, @@ -263,6 +277,7 @@ where self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_6.clone(), UnsafeSimulator::new( Arc::clone(&provider), @@ -275,6 +290,7 @@ where self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_6.clone(), simulation::new_v0_6_simulator( Arc::clone(&provider), @@ -295,6 +311,7 @@ where &self, ep: &EntryPointBuilderSettings, provider: Arc>, + submit_provider: Option>>, ep_v0_7: E, ) -> anyhow::Result<( Vec>>, @@ -312,6 +329,7 @@ where self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_7.clone(), UnsafeSimulator::new( Arc::clone(&provider), @@ -324,6 +342,7 @@ where self.create_bundle_builder( i + ep.bundle_builder_index_offset, Arc::clone(&provider), + submit_provider.clone(), ep_v0_7.clone(), simulation::new_v0_7_simulator( Arc::clone(&provider), @@ -344,6 +363,7 @@ where &self, index: u64, provider: Arc>, + submit_provider: Option>>, entry_point: E, simulator: S, ) -> anyhow::Result<( @@ -403,12 +423,8 @@ where bundle_priority_fee_overhead_percent: self.args.bundle_priority_fee_overhead_percent, }; - let submit_provider = rundler_provider::new_provider( - &self.args.submit_url, - Some(self.args.eth_poll_interval), - )?; - let transaction_sender = self.args.sender_args.clone().into_sender( + Arc::clone(&provider), submit_provider, signer, self.args.eth_poll_interval, diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index 775ec0905..a68798ee6 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -369,7 +369,7 @@ where .await .context("tracker should check for dropped transactions")?; Ok(match status { - TxStatus::Pending | TxStatus::Dropped => None, + TxStatus::Pending => None, TxStatus::Mined { block_number } => { let nonce = self.nonce; self.set_nonce_and_clear_state(nonce + 1); @@ -382,11 +382,11 @@ where gas_limit, gas_used, }) - } // TODO(#295): dropped status is often incorrect, for now just assume its still pending - // TxStatus::Dropped => { - // self.has_dropped = true; - // Some(TrackerUpdate::LatestTxDropped { nonce: self.nonce }) - // } + } + TxStatus::Dropped => { + self.has_dropped = true; + Some(TrackerUpdate::LatestTxDropped { nonce: self.nonce }) + } }) } @@ -513,50 +513,44 @@ mod tests { ); } - // TODO(#295): fix dropped status - // #[tokio::test] - // async fn test_nonce_and_fees_dropped() { - // let (mut sender, mut provider) = create_base_config(); - // sender.expect_address().return_const(Address::zero()); - - // sender - // .expect_get_transaction_status() - // .returning(move |_a| Box::pin(async { Ok(TxStatus::Dropped) })); + #[tokio::test] + async fn test_nonce_and_fees_dropped() { + let (mut sender, mut provider) = create_base_config(); + sender.expect_address().return_const(Address::zero()); - // sender.expect_send_transaction().returning(move |_a, _b| { - // Box::pin(async { - // Ok(SentTxInfo { - // nonce: U256::from(0), - // tx_hash: H256::zero(), - // }) - // }) - // }); + sender + .expect_get_transaction_status() + .returning(move |_a| Box::pin(async { Ok(TxStatus::Dropped) })); - // provider - // .expect_get_transaction_count() - // .returning(move |_a| Ok(U256::from(0))); + sender.expect_send_transaction().returning(move |_a, _b| { + Box::pin(async { + Ok(SentTxInfo { + nonce: U256::from(0), + tx_hash: H256::zero(), + }) + }) + }); - // provider - // .expect_get_block_number() - // .returning(move || Ok(1)) - // .times(1); + provider + .expect_get_transaction_count() + .returning(move |_a| Ok(U256::from(0))); - // let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; - // let tx = Eip1559TransactionRequest::new() - // .nonce(0) - // .gas(10000) - // .max_fee_per_gas(10000); - // let exp = ExpectedStorage::default(); + let tx = Eip1559TransactionRequest::new() + .nonce(0) + .gas(10000) + .max_fee_per_gas(10000); + let exp = ExpectedStorage::default(); - // // send dummy transaction - // let _sent = tracker.send_transaction(tx.into(), &exp).await; - // let _tracker_update = tracker.wait_for_update().await.unwrap(); + // send dummy transaction + let _sent = tracker.send_transaction(tx.into(), &exp).await; + let _tracker_update = tracker.check_for_update().await.unwrap(); - // let nonce_and_fees = tracker.get_nonce_and_required_fees().unwrap(); + let nonce_and_fees = tracker.get_nonce_and_required_fees().unwrap(); - // assert_eq!((U256::from(0), None), nonce_and_fees); - // } + assert_eq!((U256::from(0), None), nonce_and_fees); + } #[tokio::test] async fn test_send_transaction_without_nonce() { diff --git a/docs/cli.md b/docs/cli.md index 3ac1402b7..1c39b3494 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -180,22 +180,28 @@ List of command line options for configuring the Builder. - *Only required when AWS_KMS_KEY_IDS are provided* - `--builder.max_bundle_size`: Maximum number of ops to include in one bundle (default: `128`) - env: *BUILDER_MAX_BUNDLE_SIZE* -- `--builder.submit_url`: If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. - - env: *BUILDER_SUBMIT_URL* -- `--builder.sender`: Choice of what sender type to use for transaction submission. (default: `raw`, options: `raw`, `conditional`, `flashbots`, `polygon_bloxroute`) - - env: *BUILDER_SENDER* - `--builder.max_blocks_to_wait_for_mine`: After submitting a bundle transaction, the maximum number of blocks to wait for that transaction to mine before trying to resend with higher gas fees (default: `2`) - env: *BUILDER_MAX_BLOCKS_TO_WAIT_FOR_MINE* - `--builder.replacement_fee_percent_increase`: Percentage amount to increase gas fees when retrying a transaction after it failed to mine (default: `10`) - env: *BUILDER_REPLACEMENT_FEE_PERCENT_INCREASE* - `--builder.max_fee_increases`: Maximum number of fee increases to attempt (Seven increases of 10% is roughly 2x the initial fees) (default: `7`) - env: *BUILDER_MAX_FEE_INCREASES* -- `--builder.flashbots_relay_builders`: additional builders to send bundles to through the Flashbots relay RPC (comma-separated). List of builders that the Flashbots RPC supports can be found [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendprivatetransaction). (default: `flashbots`) -- `--builder.flashbots_relay_auth_key`: authorization key to use with the flashbots relay. See [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#authentication) for more info. (default: None) +- `--builder.sender`: Choice of what sender type to use for transaction submission. (default: `raw`, options: `raw`, `flashbots`, `polygon_bloxroute`) + - env: *BUILDER_SENDER* +- `--builder.submit_url`: Only used if builder.sender == "raw." If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. + - env: *BUILDER_SUBMIT_URL* +- `--builder.use_submit_for_status`: Only used if builder.sender == "raw." Use the submit url to get the status of the bundle transaction. (default: `false`) + - env: *BUILDER_USE_SUBMIT_FOR_STATUS* +- `--builder.use_conditional_rpc`: Only used if builder.sender == "raw." Use `eth_sendRawTransactionConditional` when submitting. (default: `false`) + - env: *BUILDER_USE_CONDITIONAL_RPC* +- `--builder.dropped_status_unsupported`: Only used if builder.sender == "raw." If set, the builder will not process a dropped status. Use this if the URL that is being used for status (node_http or submit_url) does not support pending transactions, only those that are mined. (default: `false`) + - env: *BUILDER_DROPPED_STATUS_UNSUPPORTED* +- `--builder.flashbots_relay_builders`: Only used if builder.sender == "flashbots." Additional builders to send bundles to through the Flashbots relay RPC (comma-separated). List of builders that the Flashbots RPC supports can be found [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendprivatetransaction). (default: `flashbots`) - env: *BUILDER_FLASHBOTS_RELAY_BUILDERS* -- `--builder.bloxroute_auth_header`: If using the bloxroute transaction sender on Polygon, this is the auth header to supply with the requests. (default: None) +- `--builder.flashbots_relay_auth_key`: Only used/required if builder.sender == "flashbots." Authorization key to use with the flashbots relay. See [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#authentication) for more info. (default: None) + - env: *BUILDER_FLASHBOTS_RELAY_AUTH_KEY* +- `--builder.bloxroute_auth_header`: Only used/required if builder.sender == "polygon_bloxroute." If using the bloxroute transaction sender on Polygon, this is the auth header to supply with the requests. (default: None) - env: `BUILDER_BLOXROUTE_AUTH_HEADER` - - *Only required when `--builder.sender=polygon_bloxroute`* - `--builder.index_offset`: If running multiple builder processes, this is the index offset to assign unique indexes to each bundle sender. (default: 0) - env: `BUILDER_INDEX_OFFSET` - `--builder.pool_url`: If running in distributed mode, the URL of the pool server to use. @@ -216,11 +222,11 @@ Here are some example commands to use the CLI: ```sh # Run the Node subcommand with custom options -$ ./rundler node --entry_points 0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 --chain_id 1337 --max_verification_gas 10000000 +$ ./rundler node --chain_id 1337 --max_verification_gas 10000000 --disable_entry_point_v0_6 # Run the RPC subcommand with custom options and enable JSON logging. The builder and pool will need to be running before this starts. -$ ./rundler rpc --node_http http://localhost:8545 --log.json +$ ./rundler rpc --node_http http://localhost:8545 --log.json --disable_entry_point_v0_6 # Run the Pool subcommand with custom options and specify a mempool config file -$ ./rundler pool --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --chain_id 8453 +$ ./rundler pool --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --chain_id 8453 --disable_entry_point_v0_6 ``` From 394ff683666038a64adc65a9e50600e173d1ae92 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Thu, 13 Jun 2024 17:15:09 -0500 Subject: [PATCH 08/17] feat(builder): reject ops if condition not met after failure --- crates/builder/src/bundle_proposer.rs | 94 ++++++++++++++++++- crates/builder/src/bundle_sender.rs | 27 +++++- crates/builder/src/emit.rs | 11 +++ crates/builder/src/sender/mod.rs | 11 +++ crates/builder/src/transaction_tracker.rs | 3 + crates/provider/src/ethers/provider.rs | 46 ++++++++- crates/provider/src/traits/provider.rs | 7 ++ crates/types/build.rs | 1 + crates/types/contracts/foundry.toml | 2 +- .../contracts/src/utils/StorageLoader.sol | 17 ++++ 10 files changed, 209 insertions(+), 10 deletions(-) create mode 100644 crates/types/contracts/src/utils/StorageLoader.sol diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index d006a395f..dccf22a3a 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -46,7 +46,7 @@ use rundler_utils::{emit::WithEntryPoint, math}; use tokio::{sync::broadcast, try_join}; use tracing::{error, info, warn}; -use crate::emit::{BuilderEvent, OpRejectionReason, SkipReason}; +use crate::emit::{BuilderEvent, ConditionNotMetReason, OpRejectionReason, SkipReason}; /// Extra buffer percent to add on the bundle transaction gas estimate to be sure it will be enough const BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT: u64 = 5; @@ -101,7 +101,7 @@ pub(crate) trait BundleProposer: Send + Sync + 'static { /// If `min_fees` is `Some`, the proposer will ensure the bundle has /// at least `min_fees`. async fn make_bundle( - &self, + &mut self, min_fees: Option, is_replacement: bool, ) -> anyhow::Result>; @@ -111,6 +111,9 @@ pub(crate) trait BundleProposer: Send + Sync + 'static { /// If `min_fees` is `Some`, the proposer will ensure the gas fees returned are at least `min_fees`. async fn estimate_gas_fees(&self, min_fees: Option) -> anyhow::Result<(GasFees, U256)>; + + /// Notifies the proposer that a condition was not met during the last bundle proposal + fn notify_condition_not_met(&mut self); } #[derive(Debug)] @@ -123,6 +126,7 @@ pub(crate) struct BundleProposerImpl { settings: Settings, fee_estimator: FeeEstimator

, event_sender: broadcast::Sender>, + condition_not_met_notified: bool, _uo_type: PhantomData, } @@ -155,8 +159,12 @@ where self.fee_estimator.required_bundle_fees(required_fees).await } + fn notify_condition_not_met(&mut self) { + self.condition_not_met_notified = true; + } + async fn make_bundle( - &self, + &mut self, required_fees: Option, is_replacement: bool, ) -> anyhow::Result> { @@ -238,6 +246,16 @@ where gas_estimate ); + // If recently notified that a bundle condition was not met, check each of + // the conditions again to ensure if they are met, rejecting OPs if they are not. + if self.condition_not_met_notified { + self.condition_not_met_notified = false; + self.check_conditions_met(&mut context).await?; + if context.is_empty() { + break; + } + } + let mut expected_storage = ExpectedStorage::default(); for op in context.iter_ops_with_simulations() { expected_storage.merge(&op.simulation.expected_storage)?; @@ -295,6 +313,7 @@ where ), settings, event_sender, + condition_not_met_notified: false, _uo_type: PhantomData, } } @@ -549,6 +568,73 @@ where context } + async fn check_conditions_met(&self, context: &mut ProposalContext) -> anyhow::Result<()> { + let futs = context + .iter_ops_with_simulations() + .enumerate() + .map(|(i, op)| async move { + self.check_op_conditions_met(&op.simulation.expected_storage) + .await + .map(|reason| (i, reason)) + }) + .collect::>(); + + let to_reject = future::join_all(futs).await.into_iter().flatten(); + + for (index, reason) in to_reject { + self.emit(BuilderEvent::rejected_op( + self.builder_index, + self.op_hash(&context.get_op_at(index)?.op), + OpRejectionReason::ConditionNotMet(reason), + )); + self.reject_index(context, index).await; + } + + Ok(()) + } + + async fn check_op_conditions_met( + &self, + expected_storage: &ExpectedStorage, + ) -> Option { + let futs = expected_storage + .0 + .iter() + .map(|(address, slots)| async move { + let storage = match self + .provider + .batch_get_storage_at(*address, slots.keys().copied().collect()) + .await + { + Ok(storage) => storage, + Err(e) => { + error!("Error getting storage for address {address:?} failing open: {e:?}"); + return None; + } + }; + + for ((slot, expected), actual) in slots.iter().zip(storage) { + if *expected != actual { + return Some(ConditionNotMetReason { + address: *address, + slot: *slot, + expected: *expected, + actual, + }); + } + } + None + }); + + let results = future::join_all(futs).await; + for result in results { + if result.is_some() { + return result; + } + } + None + } + async fn reject_index(&self, context: &mut ProposalContext, i: usize) { let changed_aggregator = context.reject_index(i); self.compute_aggregator_signatures(context, &changed_aggregator) @@ -2154,7 +2240,7 @@ mod tests { .expect_aggregate_signatures() .returning(move |address, _| Ok(signatures_by_aggregator[&address]().unwrap())); let (event_sender, _) = broadcast::channel(16); - let proposer = BundleProposerImpl::new( + let mut proposer = BundleProposerImpl::new( 0, pool_client, simulator, diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 8a7a37af2..d3a2f7be7 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -104,6 +104,8 @@ enum SendBundleAttemptResult { NoOperations, // Replacement Underpriced ReplacementUnderpriced, + // Condition not met + ConditionNotMet, // Nonce too low NonceTooLow, } @@ -255,6 +257,11 @@ where info!("Replacement transaction underpriced, entering cancellation loop"); state.update(InnerState::Cancelling(inner.to_cancelling())); } + Ok(SendBundleAttemptResult::ConditionNotMet) => { + info!("Condition not met, notifying proposer and starting new bundle attempt"); + self.proposer.notify_condition_not_met(); + state.reset(); + } Err(error) => { error!("Bundle send error {error:?}"); self.metrics.increment_bundle_txns_failed(); @@ -487,14 +494,20 @@ where Ok(SendBundleAttemptResult::Success) } Err(TransactionTrackerError::NonceTooLow) => { - warn!("Replacement transaction underpriced"); + self.metrics.increment_bundle_txn_nonce_too_low(); + warn!("Bundle attempt nonce too low"); Ok(SendBundleAttemptResult::NonceTooLow) } Err(TransactionTrackerError::ReplacementUnderpriced) => { self.metrics.increment_bundle_txn_replacement_underpriced(); - warn!("Replacement transaction underpriced"); + warn!("Bundle attempt replacement transaction underpriced"); Ok(SendBundleAttemptResult::ReplacementUnderpriced) } + Err(TransactionTrackerError::ConditionNotMet) => { + self.metrics.increment_bundle_txn_condition_not_met(); + warn!("Bundle attempt condition not met"); + Ok(SendBundleAttemptResult::ConditionNotMet) + } Err(e) => { error!("Failed to send bundle with unexpected error: {e:?}"); Err(e.into()) @@ -505,7 +518,7 @@ where /// Builds a bundle and returns some metadata and the transaction to send /// it, or `None` if there are no valid operations available. async fn get_bundle_tx( - &self, + &mut self, nonce: U256, required_fees: Option, is_replacement: bool, @@ -964,6 +977,14 @@ impl BuilderMetrics { metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } + fn increment_bundle_txn_nonce_too_low(&self) { + metrics::counter!("builder_bundle_nonce_too_low", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_condition_not_met(&self) { + metrics::counter!("builder_bundle_condition_not_met", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + fn increment_cancellation_txns_sent(&self) { metrics::counter!("builder_cancellation_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } diff --git a/crates/builder/src/emit.rs b/crates/builder/src/emit.rs index e5c70cd36..66de76f01 100644 --- a/crates/builder/src/emit.rs +++ b/crates/builder/src/emit.rs @@ -196,6 +196,17 @@ pub enum OpRejectionReason { FailedRevalidation { error: SimulationError }, /// Operation reverted during bundle formation simulation with message FailedInBundle { message: Arc }, + /// Operation's storage slot condition was not met + ConditionNotMet(ConditionNotMetReason), +} + +/// Reason for a condition not being met +#[derive(Clone, Debug)] +pub struct ConditionNotMetReason { + pub address: Address, + pub slot: H256, + pub expected: H256, + pub actual: H256, } impl Display for BuilderEvent { diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index d2bb50fed..122640c78 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -66,6 +66,9 @@ pub(crate) enum TxSenderError { /// Nonce too low #[error("nonce too low")] NonceTooLow, + /// Conditional value not met + #[error("storage slot value condition not met")] + ConditionNotMet, /// Soft cancellation failed #[error("soft cancel failed")] SoftCancelFailed, @@ -292,6 +295,14 @@ impl From for TxSenderError { return TxSenderError::ReplacementUnderpriced; } else if e.message.contains("nonce too low") { return TxSenderError::NonceTooLow; + // Arbitrum conditional sender error message + // TODO push them to use a specific error code and to return the specific slot that is not met. + } else if e + .message + .to_lowercase() + .contains("storage slot value condition not met") + { + return TxSenderError::ConditionNotMet; } } TxSenderError::Other(value.into()) diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index a68798ee6..cc0fff660 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -77,6 +77,8 @@ pub(crate) enum TransactionTrackerError { NonceTooLow, #[error("replacement transaction underpriced")] ReplacementUnderpriced, + #[error("storage slot value condition not met")] + ConditionNotMet, /// All other errors #[error(transparent)] Other(#[from] anyhow::Error), @@ -403,6 +405,7 @@ impl From for TransactionTrackerError { TxSenderError::ReplacementUnderpriced => { TransactionTrackerError::ReplacementUnderpriced } + TxSenderError::ConditionNotMet => TransactionTrackerError::ConditionNotMet, TxSenderError::SoftCancelFailed => { TransactionTrackerError::Other(anyhow::anyhow!("soft cancel failed")) } diff --git a/crates/provider/src/ethers/provider.rs b/crates/provider/src/ethers/provider.rs index 739d7ad3f..baa757249 100644 --- a/crates/provider/src/ethers/provider.rs +++ b/crates/provider/src/ethers/provider.rs @@ -29,8 +29,9 @@ use ethers::{ }, }; use reqwest::Url; -use rundler_types::contracts::utils::get_gas_used::{ - GasUsedResult, GetGasUsed, GETGASUSED_DEPLOYED_BYTECODE, +use rundler_types::contracts::utils::{ + get_gas_used::{GasUsedResult, GetGasUsed, GETGASUSED_DEPLOYED_BYTECODE}, + storage_loader::STORAGELOADER_DEPLOYED_BYTECODE, }; use serde::{de::DeserializeOwned, Serialize}; @@ -207,6 +208,47 @@ impl Provider for EthersProvider { .await .context("should get gas used")?) } + + async fn batch_get_storage_at( + &self, + address: Address, + slots: Vec, + ) -> ProviderResult> { + let mut state_overrides = spoof::State::default(); + state_overrides + .account(address) + .code(STORAGELOADER_DEPLOYED_BYTECODE.clone()); + + let expected_ret_size = slots.len() * 32; + let slot_data = slots + .into_iter() + .flat_map(|slot| slot.to_fixed_bytes()) + .collect::>(); + + let tx: TypedTransaction = Eip1559TransactionRequest { + to: Some(address.into()), + data: Some(slot_data.into()), + ..Default::default() + } + .into(); + + let result_bytes = self + .call_raw(&tx) + .state(&state_overrides) + .await + .context("should call storage loader")?; + + if result_bytes.len() != expected_ret_size { + return Err(anyhow::anyhow!( + "expected {} bytes, got {}", + expected_ret_size, + result_bytes.len() + ) + .into()); + } + + Ok(result_bytes.chunks(32).map(H256::from_slice).collect()) + } } impl From for ProviderError { diff --git a/crates/provider/src/traits/provider.rs b/crates/provider/src/traits/provider.rs index 86a313c8e..0aac930b1 100644 --- a/crates/provider/src/traits/provider.rs +++ b/crates/provider/src/traits/provider.rs @@ -137,4 +137,11 @@ pub trait Provider: Send + Sync + Debug + 'static { data: Bytes, state_overrides: spoof::State, ) -> ProviderResult; + + /// Get the storage values at a given address and slots + async fn batch_get_storage_at( + &self, + address: Address, + slots: Vec, + ) -> ProviderResult>; } diff --git a/crates/types/build.rs b/crates/types/build.rs index 6bbb22fa4..f69d11246 100644 --- a/crates/types/build.rs +++ b/crates/types/build.rs @@ -88,6 +88,7 @@ fn generate_utils_bindings() -> Result<(), Box> { MultiAbigen::from_abigens([ abigen_of("utils", "GetCodeHashes")?, abigen_of("utils", "GetGasUsed")?, + abigen_of("utils", "StorageLoader")?, ]) .build()? .write_to_module("src/contracts/utils", false)?; diff --git a/crates/types/contracts/foundry.toml b/crates/types/contracts/foundry.toml index 0705faad6..ea3c24180 100644 --- a/crates/types/contracts/foundry.toml +++ b/crates/types/contracts/foundry.toml @@ -4,7 +4,7 @@ out = 'out' libs = ['lib'] test = 'test' cache_path = 'cache' -solc_version = '0.8.23' +solc_version = '0.8.26' remappings = [ 'forge-std/=lib/forge-std/src', diff --git a/crates/types/contracts/src/utils/StorageLoader.sol b/crates/types/contracts/src/utils/StorageLoader.sol new file mode 100644 index 000000000..1cdab1d1f --- /dev/null +++ b/crates/types/contracts/src/utils/StorageLoader.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.25; + +contract StorageLoader { + fallback() external payable { + assembly { + let cursor := 0 + + for {} lt(cursor, calldatasize()) {cursor := add(cursor, 0x20)} { + let slot := calldataload(cursor) + mstore(cursor, sload(slot)) + } + + return(0, cursor) + } + } +} From ed74764ac8ac0043de904ecce84faaf3a2f185ac Mon Sep 17 00:00:00 2001 From: dancoombs Date: Fri, 14 Jun 2024 10:40:34 -0500 Subject: [PATCH 09/17] feat(builder): allow for multiple private keys --- bin/rundler/src/cli/builder.rs | 47 ++++++++++++++++++++++++++------ crates/builder/src/sender/mod.rs | 36 +++++++++--------------- crates/builder/src/task.rs | 25 +++++++++++++---- docs/cli.md | 5 +++- 4 files changed, 74 insertions(+), 39 deletions(-) diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index 94f9b83c4..3ad87f04e 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -13,7 +13,7 @@ use std::{net::SocketAddr, time::Duration}; -use anyhow::Context; +use anyhow::{bail, Context}; use clap::Args; use rundler_builder::{ self, BloxrouteSenderArgs, BuilderEvent, BuilderEventKind, BuilderTask, BuilderTaskArgs, @@ -57,6 +57,10 @@ pub struct BuilderArgs { host: String, /// Private key to use for signing transactions + /// DEPRECATED: Use `builder.private_keys` instead + /// + /// If both `builder.private_key` and `builder.private_keys` are set, `builder.private_key` is appended + /// to `builder.private_keys`. Keys must be unique. #[arg( long = "builder.private_key", name = "builder.private_key", @@ -64,6 +68,17 @@ pub struct BuilderArgs { )] private_key: Option, + /// Private keys to use for signing transactions + /// + /// Cannot use both `builder.private_key` and `builder.aws_kms_key_ids` at the same time. + #[arg( + long = "builder.private_keys", + name = "builder.private_keys", + env = "BUILDER_PRIVATE_KEYS", + value_delimiter = ',' + )] + private_keys: Vec, + /// AWS KMS key IDs to use for signing transactions #[arg( long = "builder.aws_kms_key_ids", @@ -284,17 +299,31 @@ impl BuilderArgs { num_builders += common.num_builders_v0_7; } - if self.private_key.is_some() { - if num_builders > 1 { - return Err(anyhow::anyhow!( - "Cannot use a private key with multiple builders. You may need to disable one of the entry points." - )); + if (self.private_key.is_some() || !self.private_keys.is_empty()) + && !self.aws_kms_key_ids.is_empty() + { + bail!( + "Cannot use both builder.private_key(s) and builder.aws_kms_key_ids at the same time." + ); + } + + let mut private_keys = self.private_keys.clone(); + if self.private_key.is_some() || !self.private_keys.is_empty() { + if let Some(pk) = &self.private_key { + private_keys.push(pk.clone()); + } + + if num_builders > private_keys.len() as u64 { + bail!( + "Found {} private keys, but need {} keys for the number of builders. You may need to disable one of the entry points.", + private_keys.len(), num_builders + ); } } else if self.aws_kms_key_ids.len() < num_builders as usize { - return Err(anyhow::anyhow!( + bail!( "Not enough AWS KMS key IDs for the number of builders. Need {} keys, found {}. You may need to disable one of the entry points.", num_builders, self.aws_kms_key_ids.len() - )); + ); } let sender_args = self.sender_args(&chain_spec, &rpc_url)?; @@ -304,7 +333,7 @@ impl BuilderArgs { chain_spec, unsafe_mode: common.unsafe_mode, rpc_url, - private_key: self.private_key.clone(), + private_keys, aws_kms_key_ids: self.aws_kms_key_ids.clone(), aws_kms_region: common .aws_region diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index 122640c78..fcc0ed70d 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -183,33 +183,23 @@ impl TransactionSenderArgs { { let sender = match self { Self::Raw(args) => { - if let Some(submit_provider) = submit_provider { + let (provider, submitter) = if let Some(submit_provider) = submit_provider { if args.use_submit_for_status { - TransactionSenderEnum::Raw(RawTransactionSender::new( - Arc::clone(&submit_provider), - submit_provider, - signer, - args.dropped_status_supported, - args.use_conditional_rpc, - )) + (Arc::clone(&submit_provider), submit_provider) } else { - TransactionSenderEnum::Raw(RawTransactionSender::new( - rpc_provider, - submit_provider, - signer, - args.dropped_status_supported, - args.use_conditional_rpc, - )) + (rpc_provider, submit_provider) } } else { - TransactionSenderEnum::Raw(RawTransactionSender::new( - Arc::clone(&rpc_provider), - rpc_provider, - signer, - args.dropped_status_supported, - args.use_conditional_rpc, - )) - } + (Arc::clone(&rpc_provider), rpc_provider) + }; + + TransactionSenderEnum::Raw(RawTransactionSender::new( + provider, + submitter, + signer, + args.dropped_status_supported, + args.use_conditional_rpc, + )) } Self::Flashbots(args) => { let flashbots_signer = args.auth_key.parse().context("should parse auth key")?; diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 36e05c011..933cc35a0 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -62,8 +62,8 @@ pub struct Args { /// True if using unsafe mode pub unsafe_mode: bool, /// Private key to use for signing transactions - /// If not provided, AWS KMS will be used - pub private_key: Option, + /// If empty, AWS KMS will be used + pub private_keys: Vec, /// AWS KMS key ids to use for signing transactions /// Only used if private_key is not provided pub aws_kms_key_ids: Vec, @@ -152,6 +152,7 @@ where let mut sender_handles = vec![]; let mut bundle_sender_actions = vec![]; + let mut pk_iter = self.args.private_keys.clone().into_iter(); for ep in &self.args.entry_points { match ep.version { @@ -162,6 +163,7 @@ where Arc::clone(&provider), submit_provider.clone(), ep_v0_6.clone(), + &mut pk_iter, ) .await?; sender_handles.extend(handles); @@ -174,6 +176,7 @@ where Arc::clone(&provider), submit_provider.clone(), ep_v0_7.clone(), + &mut pk_iter, ) .await?; sender_handles.extend(handles); @@ -255,12 +258,13 @@ where Box::new(self) } - async fn create_builders_v0_6( + async fn create_builders_v0_6( &self, ep: &EntryPointBuilderSettings, provider: Arc>, submit_provider: Option>>, ep_v0_6: E, + pk_iter: &mut I, ) -> anyhow::Result<( Vec>>, Vec>, @@ -268,6 +272,7 @@ where where C: JsonRpcClient + 'static, E: EntryPointProvider + Clone, + I: Iterator, { info!("Mempool config for ep v0.6: {:?}", ep.mempool_configs); let mut sender_handles = vec![]; @@ -284,6 +289,7 @@ where ep_v0_6.clone(), self.args.sim_settings.clone(), ), + pk_iter, ) .await? } else { @@ -298,6 +304,7 @@ where self.args.sim_settings.clone(), ep.mempool_configs.clone(), ), + pk_iter, ) .await? }; @@ -307,12 +314,13 @@ where Ok((sender_handles, bundle_sender_actions)) } - async fn create_builders_v0_7( + async fn create_builders_v0_7( &self, ep: &EntryPointBuilderSettings, provider: Arc>, submit_provider: Option>>, ep_v0_7: E, + pk_iter: &mut I, ) -> anyhow::Result<( Vec>>, Vec>, @@ -320,6 +328,7 @@ where where C: JsonRpcClient + 'static, E: EntryPointProvider + Clone, + I: Iterator, { info!("Mempool config for ep v0.7: {:?}", ep.mempool_configs); let mut sender_handles = vec![]; @@ -336,6 +345,7 @@ where ep_v0_7.clone(), self.args.sim_settings.clone(), ), + pk_iter, ) .await? } else { @@ -350,6 +360,7 @@ where self.args.sim_settings.clone(), ep.mempool_configs.clone(), ), + pk_iter, ) .await? }; @@ -359,13 +370,14 @@ where Ok((sender_handles, bundle_sender_actions)) } - async fn create_bundle_builder( + async fn create_bundle_builder( &self, index: u64, provider: Arc>, submit_provider: Option>>, entry_point: E, simulator: S, + pk_iter: &mut I, ) -> anyhow::Result<( JoinHandle>, mpsc::Sender, @@ -376,10 +388,11 @@ where E: EntryPointProvider + Clone, S: Simulator, C: JsonRpcClient + 'static, + I: Iterator, { let (send_bundle_tx, send_bundle_rx) = mpsc::channel(1); - let signer = if let Some(pk) = &self.args.private_key { + let signer = if let Some(pk) = pk_iter.next() { info!("Using local signer"); BundlerSigner::Local( LocalSigner::connect( diff --git a/docs/cli.md b/docs/cli.md index 1c39b3494..ff26b0216 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -168,10 +168,13 @@ List of command line options for configuring the Builder. - *Only required when running in distributed mode* - `--builder.private_key`: Private key to use for signing transactions - env: *BUILDER_PRIVATE_KEY* - - *Always used if provided. If not provided builder.aws_kms_key_ids is used* + - **DEPRECATED**: Use `--builder.private_keys` instead. If both used this is added to the list. +- `--builder.private_keys`: Private keys to use for signing transactions, separated by `,` + - env: *BUILDER_PRIVATE_KEYS* - `--builder.aws_kms_key_ids`: AWS KMS key IDs to use for signing transactions (comma-separated) - env: *BUILDER_AWS_KMS_KEY_IDS* - *Only required if BUILDER_PRIVATE_KEY is not provided* + - *Cannot use `builder.private_keys` and `builder.aws_kms_key_ids` at the same time* - `--builder.redis_uri`: Redis URI to use for KMS leasing (default: `""`) - env: *BUILDER_REDIS_URI* - *Only required when AWS_KMS_KEY_IDS are provided* From 3de6345fa335e987a280ad578d0ae8393391ba08 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 18 Jun 2024 22:03:00 -0500 Subject: [PATCH 10/17] chore(builder): add unit tests to bundle_sender --- crates/builder/src/bundle_proposer.rs | 111 +++- crates/builder/src/bundle_sender.rs | 697 +++++++++++++++++++--- crates/builder/src/transaction_tracker.rs | 3 + 3 files changed, 741 insertions(+), 70 deletions(-) diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index dccf22a3a..17999cfcc 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -91,8 +91,8 @@ impl Bundle { } } -#[cfg_attr(test, automock(type UO = rundler_types::v0_6::UserOperation;))] #[async_trait] +#[cfg_attr(test, automock(type UO = rundler_types::v0_6::UserOperation;))] pub(crate) trait BundleProposer: Send + Sync + 'static { type UO: UserOperation; @@ -1579,6 +1579,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1613,6 +1615,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1655,6 +1659,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1746,6 +1752,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; // Ops should be grouped by aggregator. Further, the `signature` field @@ -1834,6 +1842,8 @@ mod tests { vec![deposit, deposit, deposit], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1895,6 +1905,8 @@ mod tests { vec![deposit, deposit, deposit], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2024,6 +2036,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2056,6 +2070,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2122,6 +2138,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2138,6 +2156,76 @@ mod tests { ); } + #[tokio::test] + async fn test_condition_not_met_match() { + let op = default_op(); + + let mut expected_storage = ExpectedStorage::default(); + expected_storage.insert(address(1), U256::zero(), U256::zero()); + let actual_storage = expected_storage.clone(); + + let bundle = mock_make_bundle( + vec![MockOp { + op: op.clone(), + simulation_result: Box::new(move || { + Ok(SimulationResult { + expected_storage: expected_storage.clone(), + ..Default::default() + }) + }), + }], + vec![], + vec![HandleOpsOut::Success], + vec![], + U256::zero(), + U256::zero(), + true, + actual_storage, + ) + .await; + + assert_eq!( + bundle.ops_per_aggregator, + vec![UserOpsPerAggregator { + user_ops: vec![op], + ..Default::default() + }] + ); + } + + #[tokio::test] + async fn test_condition_not_met_mismatch() { + let op = default_op(); + + let mut expected_storage = ExpectedStorage::default(); + expected_storage.insert(address(1), U256::zero(), U256::zero()); + let mut actual_storage = ExpectedStorage::default(); + actual_storage.insert(address(1), U256::zero(), U256::from(1)); + + let bundle = mock_make_bundle( + vec![MockOp { + op: op.clone(), + simulation_result: Box::new(move || { + Ok(SimulationResult { + expected_storage: expected_storage.clone(), + ..Default::default() + }) + }), + }], + vec![], + vec![HandleOpsOut::Success], + vec![], + U256::zero(), + U256::zero(), + true, + actual_storage, + ) + .await; + + assert!(bundle.ops_per_aggregator.is_empty()); + assert_eq!(bundle.rejected_ops, vec![op]); + } + struct MockOp { op: UserOperation, simulation_result: Box Result + Send + Sync>, @@ -2156,10 +2244,13 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await } + #[allow(clippy::too_many_arguments)] async fn mock_make_bundle( mock_ops: Vec, mock_aggregators: Vec, @@ -2167,6 +2258,8 @@ mod tests { mock_paymaster_deposits: Vec, base_fee: U256, max_priority_fee_per_gas: U256, + notify_condition_not_met: bool, + actual_storage: ExpectedStorage, ) -> Bundle { let entry_point_address = address(123); let beneficiary = address(124); @@ -2226,6 +2319,7 @@ mod tests { .into_iter() .map(|agg| (agg.address, agg.signature)) .collect(); + let mut provider = MockProvider::new(); provider .expect_get_latest_block_hash_and_number() @@ -2236,6 +2330,16 @@ mod tests { provider .expect_get_max_priority_fee() .returning(move || Ok(max_priority_fee_per_gas)); + if notify_condition_not_met { + for (addr, slots) in actual_storage.0.into_iter() { + let values = slots.values().cloned().collect::>(); + provider + .expect_batch_get_storage_at() + .withf(move |a, s| *a == addr && s.iter().all(|slot| slots.contains_key(slot))) + .returning(move |_, _| Ok(values.clone())); + } + } + entry_point .expect_aggregate_signatures() .returning(move |address, _| Ok(signatures_by_aggregator[&address]().unwrap())); @@ -2256,6 +2360,11 @@ mod tests { }, event_sender, ); + + if notify_condition_not_met { + proposer.notify_condition_not_met(); + } + proposer .make_bundle(None, false) .await diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index d3a2f7be7..4c088dbb0 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -17,6 +17,8 @@ use anyhow::{bail, Context}; use async_trait::async_trait; use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; use futures_util::StreamExt; +#[cfg(test)] +use mockall::automock; use rundler_provider::{BundleHandler, EntryPoint}; use rundler_sim::ExpectedStorage; use rundler_types::{ @@ -186,7 +188,10 @@ where } } - async fn step_state(&mut self, state: &mut SenderMachineState) -> anyhow::Result<()> { + async fn step_state( + &mut self, + state: &mut SenderMachineState, + ) -> anyhow::Result<()> { let tracker_update = state.wait_for_trigger().await?; match state.inner { @@ -210,9 +215,9 @@ where Ok(()) } - async fn handle_building_state( + async fn handle_building_state( &mut self, - state: &mut SenderMachineState, + state: &mut SenderMachineState, inner: BuildingState, ) -> anyhow::Result<()> { // send bundle @@ -273,9 +278,9 @@ where Ok(()) } - async fn handle_pending_state( + async fn handle_pending_state( &mut self, - state: &mut SenderMachineState, + state: &mut SenderMachineState, inner: PendingState, tracker_update: Option, ) -> anyhow::Result<()> { @@ -342,9 +347,9 @@ where Ok(()) } - async fn handle_cancelling_state( + async fn handle_cancelling_state( &mut self, - state: &mut SenderMachineState, + state: &mut SenderMachineState, inner: CancellingState, ) -> anyhow::Result<()> { info!("Cancelling last transaction"); @@ -393,9 +398,9 @@ where Ok(()) } - async fn handle_cancel_pending_state( + async fn handle_cancel_pending_state( &mut self, - state: &mut SenderMachineState, + state: &mut SenderMachineState, inner: CancelPendingState, tracker_update: Option, ) -> anyhow::Result<()> { @@ -444,9 +449,9 @@ where /// - There are no ops available to bundle initially. /// - The gas fees are high enough that the bundle is empty because there /// are no ops that meet the fee requirements. - async fn send_bundle( + async fn send_bundle( &mut self, - state: &mut SenderMachineState, + state: &mut SenderMachineState, fee_increase_count: u64, ) -> anyhow::Result { let (nonce, required_fees) = state.transaction_tracker.get_nonce_and_required_fees()?; @@ -528,19 +533,31 @@ where .make_bundle(required_fees, is_replacement) .await .context("proposer should create bundle for builder")?; + let remove_ops_future = async { + if bundle.rejected_ops.is_empty() { + return; + } + let result = self.remove_ops_from_pool(&bundle.rejected_ops).await; if let Err(error) = result { error!("Failed to remove rejected ops from pool: {error}"); } }; + let update_entities_future = async { + if bundle.entity_updates.is_empty() { + return; + } + let result = self.update_entities_in_pool(&bundle.entity_updates).await; if let Err(error) = result { error!("Failed to update entities in pool: {error}"); } }; + join!(remove_ops_future, update_entities_future); + if bundle.is_empty() { if !bundle.rejected_ops.is_empty() || !bundle.entity_updates.is_empty() { info!( @@ -603,16 +620,16 @@ where } } -struct SenderMachineState { - trigger: BundleSenderTrigger, +struct SenderMachineState { + trigger: TRIG, transaction_tracker: T, send_bundle_response: Option>, inner: InnerState, requires_reset: bool, } -impl SenderMachineState { - fn new(trigger: BundleSenderTrigger, transaction_tracker: T) -> Self { +impl SenderMachineState { + fn new(trigger: TRIG, transaction_tracker: T) -> Self { Self { trigger, transaction_tracker, @@ -732,7 +749,7 @@ struct PendingState { impl PendingState { fn to_building(self) -> BuildingState { BuildingState { - wait_for_trigger: true, + wait_for_trigger: false, fee_increase_count: self.fee_increase_count + 1, } } @@ -771,6 +788,18 @@ impl CancelPendingState { } } +#[async_trait] +#[cfg_attr(test, automock)] +trait Trigger { + async fn wait_for_trigger( + &mut self, + ) -> anyhow::Result>>; + + async fn wait_for_block(&mut self) -> anyhow::Result; + + fn last_block(&self) -> &NewHead; +} + struct BundleSenderTrigger { bundling_mode: BundlingMode, block_rx: UnboundedReceiver, @@ -779,55 +808,8 @@ struct BundleSenderTrigger { last_block: NewHead, } -impl BundleSenderTrigger { - async fn new( - pool_client: &P, - bundle_action_receiver: mpsc::Receiver, - timer_interval: Duration, - ) -> anyhow::Result { - let block_rx = Self::start_block_stream(pool_client).await?; - - Ok(Self { - bundling_mode: BundlingMode::Auto, - block_rx, - bundle_action_receiver, - timer: tokio::time::interval(timer_interval), - last_block: NewHead { - block_hash: H256::zero(), - block_number: 0, - }, - }) - } - - async fn start_block_stream( - pool_client: &P, - ) -> anyhow::Result> { - let Ok(mut new_heads) = pool_client.subscribe_new_heads().await else { - error!("Failed to subscribe to new blocks"); - bail!("failed to subscribe to new blocks"); - }; - - let (tx, rx) = mpsc::unbounded_channel(); - tokio::spawn(async move { - loop { - match new_heads.next().await { - Some(b) => { - if tx.send(b).is_err() { - error!("Failed to buffer new block for bundle sender"); - return; - } - } - None => { - error!("Block stream ended"); - return; - } - } - } - }); - - Ok(rx) - } - +#[async_trait] +impl Trigger for BundleSenderTrigger { async fn wait_for_trigger( &mut self, ) -> anyhow::Result>> { @@ -905,6 +887,60 @@ impl BundleSenderTrigger { Ok(self.last_block.clone()) } + fn last_block(&self) -> &NewHead { + &self.last_block + } +} + +impl BundleSenderTrigger { + async fn new( + pool_client: &P, + bundle_action_receiver: mpsc::Receiver, + timer_interval: Duration, + ) -> anyhow::Result { + let block_rx = Self::start_block_stream(pool_client).await?; + + Ok(Self { + bundling_mode: BundlingMode::Auto, + block_rx, + bundle_action_receiver, + timer: tokio::time::interval(timer_interval), + last_block: NewHead { + block_hash: H256::zero(), + block_number: 0, + }, + }) + } + + async fn start_block_stream( + pool_client: &P, + ) -> anyhow::Result> { + let Ok(mut new_heads) = pool_client.subscribe_new_heads().await else { + error!("Failed to subscribe to new blocks"); + bail!("failed to subscribe to new blocks"); + }; + + let (tx, rx) = mpsc::unbounded_channel(); + tokio::spawn(async move { + loop { + match new_heads.next().await { + Some(b) => { + if tx.send(b).is_err() { + error!("Failed to buffer new block for bundle sender"); + return; + } + } + None => { + error!("Block stream ended"); + return; + } + } + } + }); + + Ok(rx) + } + fn consume_blocks(&mut self) -> anyhow::Result<()> { // Consume any other blocks that may have been buffered up loop { @@ -922,10 +958,6 @@ impl BundleSenderTrigger { } } } - - fn last_block(&self) -> &NewHead { - &self.last_block - } } #[derive(Debug, Clone)] @@ -1005,3 +1037,530 @@ impl BuilderMetrics { metrics::counter!("builder_state_machine_errors", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } } + +#[cfg(test)] +mod tests { + use ethers::types::Bytes; + use mockall::Sequence; + use rundler_provider::MockEntryPointV0_6; + use rundler_types::{ + chain::ChainSpec, pool::MockPool, v0_6::UserOperation, UserOpsPerAggregator, + }; + use tokio::sync::{broadcast, mpsc}; + + use super::*; + use crate::{ + bundle_proposer::{Bundle, MockBundleProposer}, + bundle_sender::{BundleSenderImpl, MockTrigger}, + transaction_tracker::MockTransactionTracker, + }; + + #[tokio::test] + async fn test_empty_send() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + // block 0 + add_trigger_no_update_last_block( + &mut mock_trigger, + &mut mock_tracker, + &mut Sequence::new(), + 0, + ); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // empty bundle + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(Bundle::::default()) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in building state + let mut state = SenderMachineState::new(mock_trigger, mock_tracker); + + sender.step_state(&mut state).await.unwrap(); + + // empty bundle shouldn't move out of building state + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: true, + .. + }) + )); + } + + #[tokio::test] + async fn test_send() { + let Mocks { + mut mock_proposer, + mut mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + // block 0 + add_trigger_no_update_last_block( + &mut mock_trigger, + &mut mock_tracker, + &mut Sequence::new(), + 0, + ); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // bundle with one op + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(bundle()) })); + + // should create the bundle txn + mock_entry_point + .expect_get_send_bundle_transaction() + .returning(|_, _, _, _| TypedTransaction::default()); + + // should send the bundle txn + mock_tracker + .expect_send_transaction() + .returning(|_, _| Box::pin(async { Ok(H256::zero()) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in building state + let mut state = SenderMachineState::new(mock_trigger, mock_tracker); + + sender.step_state(&mut state).await.unwrap(); + + // end in the pending state + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { + until: 3, // block 0 + wait 3 blocks + .. + }) + )); + } + + #[tokio::test] + async fn test_wait_for_mine_success() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, 1); + mock_trigger + .expect_wait_for_block() + .once() + .in_sequence(&mut seq) + .returning(|| { + Box::pin(async { + Ok(NewHead { + block_number: 2, + block_hash: H256::zero(), + }) + }) + }); + // no call to last_block after mine + + let mut seq = Sequence::new(); + mock_tracker + .expect_check_for_update() + .once() + .in_sequence(&mut seq) + .returning(|| Box::pin(async { Ok(None) })); + mock_tracker + .expect_check_for_update() + .once() + .in_sequence(&mut seq) + .returning(|| { + Box::pin(async { + Ok(Some(TrackerUpdate::Mined { + block_number: 2, + nonce: U256::zero(), + gas_limit: None, + gas_used: None, + tx_hash: H256::zero(), + attempt_number: 0, + })) + }) + }); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in pending state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Pending(PendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + // first step has no update + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { until: 3, .. }) + )); + + // second step is mined and moves back to building + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + }) + )); + } + + #[tokio::test] + async fn test_wait_for_mine_timed_out() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + for i in 1..=3 { + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, i); + } + + mock_tracker + .expect_check_for_update() + .times(3) + .returning(|| Box::pin(async { Ok(None) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in pending state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Pending(PendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + // first and second step has no update + for _ in 0..2 { + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { until: 3, .. }) + )); + } + + // third step times out and moves back to building with a fee increase + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: false, + fee_increase_count: 1, + }) + )); + } + + #[tokio::test] + async fn test_send_cancel() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + mock_proposer + .expect_estimate_gas_fees() + .once() + .returning(|_| Box::pin(async { Ok((GasFees::default(), U256::zero())) })); + + mock_tracker + .expect_cancel_transaction() + .once() + .returning(|_, _| Box::pin(async { Ok(Some(H256::zero())) })); + + mock_trigger.expect_last_block().return_const(NewHead { + block_number: 0, + block_hash: H256::zero(), + }); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Cancelling(CancellingState { + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }) + )); + } + + #[tokio::test] + async fn test_resubmit_cancel() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + for i in 1..=3 { + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, i); + } + + mock_tracker + .expect_check_for_update() + .times(3) + .returning(|| Box::pin(async { Ok(None) })); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + for _ in 0..2 { + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }) + )); + } + + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Cancelling(CancellingState { + fee_increase_count: 1, + }) + )); + } + + #[tokio::test] + async fn test_condition_not_met() { + let Mocks { + mut mock_proposer, + mut mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_no_update_last_block(&mut mock_trigger, &mut mock_tracker, &mut seq, 1); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // bundle with one op + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(bundle()) })); + + // should create the bundle txn + mock_entry_point + .expect_get_send_bundle_transaction() + .returning(|_, _, _, _| TypedTransaction::default()); + + // should send the bundle txn, returns condition not met + mock_tracker + .expect_send_transaction() + .returning(|_, _| Box::pin(async { Err(TransactionTrackerError::ConditionNotMet) })); + + // should notify proposer that condition was not met + mock_proposer + .expect_notify_condition_not_met() + .times(1) + .return_const(()); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + sender.step_state(&mut state).await.unwrap(); + + // end back in the building state without waiting for trigger + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: false, + fee_increase_count: 0, + }) + )); + } + + struct Mocks { + mock_proposer: MockBundleProposer, + mock_entry_point: MockEntryPointV0_6, + mock_tracker: MockTransactionTracker, + mock_trigger: MockTrigger, + } + + fn new_mocks() -> Mocks { + let mut mock_entry_point = MockEntryPointV0_6::new(); + mock_entry_point + .expect_address() + .return_const(Address::default()); + + Mocks { + mock_proposer: MockBundleProposer::new(), + mock_entry_point, + mock_tracker: MockTransactionTracker::new(), + mock_trigger: MockTrigger::new(), + } + } + + fn new_sender( + mock_proposer: MockBundleProposer, + mock_entry_point: MockEntryPointV0_6, + ) -> BundleSenderImpl< + UserOperation, + MockBundleProposer, + MockEntryPointV0_6, + MockTransactionTracker, + MockPool, + > { + BundleSenderImpl::new( + 0, + mpsc::channel(1000).1, + ChainSpec::default(), + Address::default(), + mock_proposer, + mock_entry_point, + MockTransactionTracker::new(), + MockPool::new(), + Settings { + max_fee_increases: 3, + max_blocks_to_wait_for_mine: 3, + }, + broadcast::channel(1000).0, + ) + } + + fn add_trigger_no_update_last_block( + mock_trigger: &mut MockTrigger, + mock_tracker: &mut MockTransactionTracker, + seq: &mut Sequence, + block_number: u64, + ) { + mock_trigger + .expect_wait_for_trigger() + .once() + .in_sequence(seq) + .returning(move || Box::pin(async move { Ok(None) })); + mock_tracker + .expect_check_for_update() + .returning(|| Box::pin(async { Ok(None) })); + mock_trigger + .expect_last_block() + .once() + .in_sequence(seq) + .return_const(NewHead { + block_number, + block_hash: H256::zero(), + }); + } + + fn add_trigger_wait_for_block_last_block( + mock_trigger: &mut MockTrigger, + seq: &mut Sequence, + block_number: u64, + ) { + mock_trigger + .expect_wait_for_block() + .once() + .in_sequence(seq) + .returning(move || { + Box::pin(async move { + Ok(NewHead { + block_number, + block_hash: H256::zero(), + }) + }) + }); + mock_trigger + .expect_last_block() + .once() + .in_sequence(seq) + .return_const(NewHead { + block_number, + block_hash: H256::zero(), + }); + } + + fn bundle() -> Bundle { + Bundle { + gas_estimate: U256::from(100_000), + gas_fees: GasFees::default(), + expected_storage: Default::default(), + rejected_ops: vec![], + entity_updates: vec![], + ops_per_aggregator: vec![UserOpsPerAggregator { + aggregator: Address::zero(), + signature: Bytes::new(), + user_ops: vec![UserOperation::default()], + }], + } + } +} diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index cc0fff660..4514951f6 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -16,6 +16,8 @@ use std::sync::Arc; use anyhow::{bail, Context}; use async_trait::async_trait; use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; +#[cfg(test)] +use mockall::automock; use rundler_provider::Provider; use rundler_sim::ExpectedStorage; use rundler_types::GasFees; @@ -33,6 +35,7 @@ use crate::sender::{TransactionSender, TxSenderError, TxStatus}; /// succeeded (potentially not the most recent one) or whether circumstances /// have changed so that it is worth making another attempt. #[async_trait] +#[cfg_attr(test, automock)] pub(crate) trait TransactionTracker: Send + Sync + 'static { /// Returns the current nonce and the required fees for the next transaction. fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)>; From e6c9f150dba9cf45dd33e5edbd1736519255df58 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Mon, 24 Jun 2024 14:54:28 -0500 Subject: [PATCH 11/17] feat(sim): check total gas limit after estimation --- bin/rundler/src/cli/mod.rs | 1 + crates/rpc/src/eth/error.rs | 3 ++ crates/sim/src/estimation/mod.rs | 5 ++ crates/sim/src/estimation/v0_6.rs | 59 +++++++++++++++++++++ crates/sim/src/estimation/v0_7.rs | 85 ++++++++++++++++++++++++++++--- 5 files changed, 147 insertions(+), 6 deletions(-) diff --git a/bin/rundler/src/cli/mod.rs b/bin/rundler/src/cli/mod.rs index 0669c3a56..02ea790be 100644 --- a/bin/rundler/src/cli/mod.rs +++ b/bin/rundler/src/cli/mod.rs @@ -344,6 +344,7 @@ impl TryFrom<&CommonArgs> for EstimationSettings { max_call_gas, max_paymaster_verification_gas: value.max_verification_gas, max_paymaster_post_op_gas: max_call_gas, + max_total_execution_gas: value.max_bundle_gas, max_simulate_handle_ops_gas: value.max_simulate_handle_ops_gas, verification_estimation_gas_fee: value.verification_estimation_gas_fee, }) diff --git a/crates/rpc/src/eth/error.rs b/crates/rpc/src/eth/error.rs index 9bce5e653..0c58697a7 100644 --- a/crates/rpc/src/eth/error.rs +++ b/crates/rpc/src/eth/error.rs @@ -457,6 +457,9 @@ impl From for EthRpcError { error @ GasEstimationError::GasUsedTooLarge => { Self::EntryPointValidationRejected(error.to_string()) } + error @ GasEstimationError::GasTotalTooLarge(_, _) => { + Self::InvalidParams(error.to_string()) + } error @ GasEstimationError::GasFieldTooLarge(_, _) => { Self::InvalidParams(error.to_string()) } diff --git a/crates/sim/src/estimation/mod.rs b/crates/sim/src/estimation/mod.rs index 1a7bdf8fa..e8ba83089 100644 --- a/crates/sim/src/estimation/mod.rs +++ b/crates/sim/src/estimation/mod.rs @@ -54,6 +54,9 @@ pub enum GasEstimationError { /// Supplied gas was too large #[error("{0} cannot be larger than {1}")] GasFieldTooLarge(&'static str, u64), + /// The total amount of gas used by the UO is greater than allowed + #[error("total gas used by the user operation {0} is greater than the allowed limit: {1}")] + GasTotalTooLarge(u64, u64), /// Other error #[error(transparent)] Other(#[from] anyhow::Error), @@ -86,6 +89,8 @@ pub struct Settings { pub max_paymaster_verification_gas: u64, /// The maximum amount of gas that can be used for the paymaster post op step of a user operation pub max_paymaster_post_op_gas: u64, + /// The maximum amount of total execution gas to check after estimation + pub max_total_execution_gas: u64, /// The maximum amount of gas that can be used in a call to `simulateHandleOps` pub max_simulate_handle_ops_gas: u64, /// The gas fee to use during verification gas estimation, required to be held by the fee-payer diff --git a/crates/sim/src/estimation/v0_6.rs b/crates/sim/src/estimation/v0_6.rs index 56a7fd76c..9fc1a23d8 100644 --- a/crates/sim/src/estimation/v0_6.rs +++ b/crates/sim/src/estimation/v0_6.rs @@ -103,6 +103,19 @@ where let verification_gas_limit = verification_gas_limit?; let call_gas_limit = call_gas_limit?; + // Verify total gas limit + let mut op_with_gas = full_op; + op_with_gas.verification_gas_limit = verification_gas_limit; + op_with_gas.call_gas_limit = call_gas_limit; + let gas_limit = + gas::user_operation_execution_gas_limit(&self.chain_spec, &op_with_gas, true); + if gas_limit > self.settings.max_total_execution_gas.into() { + return Err(GasEstimationError::GasTotalTooLarge( + gas_limit.as_u64(), + self.settings.max_total_execution_gas, + )); + } + Ok(GasEstimate { pre_verification_gas, verification_gas_limit, @@ -524,6 +537,7 @@ mod tests { max_call_gas: TEST_MAX_GAS_LIMITS, max_paymaster_verification_gas: TEST_MAX_GAS_LIMITS, max_paymaster_post_op_gas: TEST_MAX_GAS_LIMITS, + max_total_execution_gas: TEST_MAX_GAS_LIMITS, max_simulate_handle_ops_gas: TEST_MAX_GAS_LIMITS, verification_estimation_gas_fee: 1_000_000_000_000, }; @@ -617,6 +631,7 @@ mod tests { max_call_gas: 10000000000, max_paymaster_verification_gas: 10000000000, max_paymaster_post_op_gas: 10000000000, + max_total_execution_gas: 10000000000, max_simulate_handle_ops_gas: 100000000, verification_estimation_gas_fee: 1_000_000_000_000, }; @@ -684,6 +699,7 @@ mod tests { max_call_gas: 10000000000, max_paymaster_verification_gas: 10000000000, max_paymaster_post_op_gas: 10000000000, + max_total_execution_gas: 10000000000, max_simulate_handle_ops_gas: 100000000, verification_estimation_gas_fee: 1_000_000_000_000, }; @@ -1266,6 +1282,7 @@ mod tests { max_call_gas: 10, max_paymaster_post_op_gas: 10, max_paymaster_verification_gas: 10, + max_total_execution_gas: 10, max_simulate_handle_ops_gas: 10, verification_estimation_gas_fee: 1_000_000_000_000, }; @@ -1428,6 +1445,48 @@ mod tests { )); } + #[tokio::test] + async fn test_total_limit() { + let (mut entry, mut provider) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: true, + gas_used: 0.into(), + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(TEST_MAX_GAS_LIMITS.into()); + optional_op.verification_gas_limit = Some(TEST_MAX_GAS_LIMITS.into()); + + let err = estimator + .estimate_op_gas(optional_op.clone(), spoof::state()) + .await + .err() + .unwrap(); + + assert!(matches!( + err, + GasEstimationError::GasTotalTooLarge(_, TEST_MAX_GAS_LIMITS) + )) + } + #[test] fn test_proxy_target_offset() { let proxy_target_bytes = hex::decode(PROXY_IMPLEMENTATION_ADDRESS_MARKER).unwrap(); diff --git a/crates/sim/src/estimation/v0_7.rs b/crates/sim/src/estimation/v0_7.rs index c4ebb0541..e021d1527 100644 --- a/crates/sim/src/estimation/v0_7.rs +++ b/crates/sim/src/estimation/v0_7.rs @@ -110,17 +110,32 @@ where ); tracing::debug!("gas estimation took {}ms", timer.elapsed().as_millis()); - let verification_gas_limit = verification_gas_limit?.into(); - let paymaster_verification_gas_limit = paymaster_verification_gas_limit?.into(); - let call_gas_limit = call_gas_limit?.into(); + let verification_gas_limit = verification_gas_limit?; + let paymaster_verification_gas_limit = paymaster_verification_gas_limit?; + let call_gas_limit = call_gas_limit?; + + // check the total gas limit + let mut op_with_gas = full_op; + op_with_gas.pre_verification_gas = pre_verification_gas; + op_with_gas.call_gas_limit = call_gas_limit; + op_with_gas.verification_gas_limit = verification_gas_limit; + op_with_gas.paymaster_verification_gas_limit = paymaster_verification_gas_limit; + let gas_limit = + gas::user_operation_execution_gas_limit(&self.chain_spec, &op_with_gas, true); + if gas_limit > self.settings.max_total_execution_gas.into() { + return Err(GasEstimationError::GasTotalTooLarge( + gas_limit.as_u64(), + self.settings.max_total_execution_gas, + )); + } Ok(GasEstimate { pre_verification_gas, - call_gas_limit, - verification_gas_limit, + call_gas_limit: call_gas_limit.into(), + verification_gas_limit: verification_gas_limit.into(), paymaster_verification_gas_limit: op .paymaster - .map(|_| paymaster_verification_gas_limit), + .map(|_| paymaster_verification_gas_limit.into()), }) } } @@ -563,6 +578,7 @@ mod tests { max_call_gas: TEST_MAX_GAS_LIMITS, max_paymaster_verification_gas: TEST_MAX_GAS_LIMITS, max_paymaster_post_op_gas: TEST_MAX_GAS_LIMITS, + max_total_execution_gas: TEST_MAX_GAS_LIMITS, max_simulate_handle_ops_gas: TEST_MAX_GAS_LIMITS, verification_estimation_gas_fee: 1_000_000_000_000, }; @@ -799,6 +815,63 @@ mod tests { )); } + #[tokio::test] + async fn test_total_limit() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: true, + gas_used: TEST_MAX_GAS_LIMITS.into(), + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + let (estimator, _) = create_estimator(entry, provider); + + let optional_op = UserOperationOptionalGas { + sender: Address::zero(), + nonce: U256::zero(), + call_data: Bytes::new(), + call_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + verification_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + pre_verification_gas: Some(TEST_MAX_GAS_LIMITS.into()), + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + signature: Bytes::new(), + + paymaster: None, + paymaster_data: Bytes::new(), + paymaster_verification_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + paymaster_post_op_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + + factory: None, + factory_data: Bytes::new(), + }; + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state()) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasTotalTooLarge(_, TEST_MAX_GAS_LIMITS) + )); + } + #[test] fn test_proxy_target_offset() { let proxy_target_bytes = hex::decode(PROXY_IMPLEMENTATION_ADDRESS_MARKER).unwrap(); From ac741409cc221c96e3b7941c260599159a635073 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Mon, 24 Jun 2024 12:47:19 -0500 Subject: [PATCH 12/17] fix(builder): fix builder state machine replacement tracking --- bin/rundler/src/cli/builder.rs | 26 ++- crates/builder/src/bundle_proposer.rs | 49 +++-- crates/builder/src/bundle_sender.rs | 256 +++++++++++++++++++--- crates/builder/src/task.rs | 9 +- crates/builder/src/transaction_tracker.rs | 89 ++++++-- crates/pool/src/mempool/pool.rs | 56 ++--- docs/architecture/builder.md | 74 ++++++- docs/cli.md | 6 +- 8 files changed, 436 insertions(+), 129 deletions(-) diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index 3ad87f04e..333c86dbe 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -226,16 +226,25 @@ pub struct BuilderArgs { )] replacement_fee_percent_increase: u64, - /// Maximum number of times to increase gas fees when retrying a transaction + /// Maximum number of times to increase gas fees when retrying a cancellation transaction /// before giving up. #[arg( - long = "builder.max_fee_increases", - name = "builder.max_fee_increases", - env = "BUILDER_MAX_FEE_INCREASES", - // Seven increases of 10% is roughly 2x the initial fees. - default_value = "7" + long = "builder.max_cancellation_fee_increases", + name = "builder.max_cancellation_fee_increases", + env = "BUILDER_MAX_CANCELLATION_FEE_INCREASES", + default_value = "15" )] - max_fee_increases: u64, + max_cancellation_fee_increases: u64, + + /// The maximum number of blocks to wait in a replacement underpriced state before issuing + /// a cancellation transaction. + #[arg( + long = "builder.max_replacement_underpriced_blocks", + name = "builder.max_replacement_underpriced_blocks", + env = "BUILDER_MAX_REPLACEMENT_UNDERPRICED_BLOCKS", + default_value = "20" + )] + max_replacement_underpriced_blocks: u64, /// The index offset to apply to the builder index #[arg( @@ -350,7 +359,8 @@ impl BuilderArgs { sim_settings: common.try_into()?, max_blocks_to_wait_for_mine: self.max_blocks_to_wait_for_mine, replacement_fee_percent_increase: self.replacement_fee_percent_increase, - max_fee_increases: self.max_fee_increases, + max_cancellation_fee_increases: self.max_cancellation_fee_increases, + max_replacement_underpriced_blocks: self.max_replacement_underpriced_blocks, remote_address, }) } diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 17999cfcc..ea4def832 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -104,18 +104,35 @@ pub(crate) trait BundleProposer: Send + Sync + 'static { &mut self, min_fees: Option, is_replacement: bool, - ) -> anyhow::Result>; + ) -> BundleProposerResult>; /// Gets the current gas fees /// /// If `min_fees` is `Some`, the proposer will ensure the gas fees returned are at least `min_fees`. - async fn estimate_gas_fees(&self, min_fees: Option) - -> anyhow::Result<(GasFees, U256)>; + async fn estimate_gas_fees( + &self, + min_fees: Option, + ) -> BundleProposerResult<(GasFees, U256)>; /// Notifies the proposer that a condition was not met during the last bundle proposal fn notify_condition_not_met(&mut self); } +pub(crate) type BundleProposerResult = std::result::Result; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum BundleProposerError { + #[error("No operations initially")] + NoOperationsInitially, + #[error("No operations after fee filtering")] + NoOperationsAfterFeeFilter, + #[error(transparent)] + ProviderError(#[from] rundler_provider::ProviderError), + /// All other errors + #[error(transparent)] + Other(#[from] anyhow::Error), +} + #[derive(Debug)] pub(crate) struct BundleProposerImpl { builder_index: u64, @@ -155,8 +172,11 @@ where async fn estimate_gas_fees( &self, required_fees: Option, - ) -> anyhow::Result<(GasFees, U256)> { - self.fee_estimator.required_bundle_fees(required_fees).await + ) -> BundleProposerResult<(GasFees, U256)> { + Ok(self + .fee_estimator + .required_bundle_fees(required_fees) + .await?) } fn notify_condition_not_met(&mut self) { @@ -167,16 +187,16 @@ where &mut self, required_fees: Option, is_replacement: bool, - ) -> anyhow::Result> { + ) -> BundleProposerResult> { let (ops, (block_hash, _), (bundle_fees, base_fee)) = try_join!( self.get_ops_from_pool(), self.provider .get_latest_block_hash_and_number() - .map_err(anyhow::Error::from), + .map_err(BundleProposerError::from), self.estimate_gas_fees(required_fees) )?; if ops.is_empty() { - return Ok(Bundle::default()); + return Err(BundleProposerError::NoOperationsInitially); } tracing::debug!("Starting bundle proposal with {} ops", ops.len()); @@ -206,7 +226,7 @@ where tracing::debug!("Bundle proposal after fee limit had {} ops", ops.len()); if ops.is_empty() { - return Ok(Bundle::default()); + return Err(BundleProposerError::NoOperationsAfterFeeFilter); } // (2) Limit the amount of operations for simulation @@ -686,7 +706,7 @@ where async fn estimate_gas_rejecting_failed_ops( &self, context: &mut ProposalContext, - ) -> anyhow::Result> { + ) -> BundleProposerResult> { // sum up the gas needed for all the ops in the bundle // and apply an overhead multiplier let gas = math::increase_by_percent( @@ -731,7 +751,7 @@ where } } - async fn get_ops_from_pool(&self) -> anyhow::Result> { + async fn get_ops_from_pool(&self) -> BundleProposerResult> { // Use builder's index as the shard index to ensure that two builders don't // attempt to bundle the same operations. // @@ -754,7 +774,7 @@ where &self, addresses: impl IntoIterator, block_hash: H256, - ) -> anyhow::Result> { + ) -> BundleProposerResult> { let futures = addresses.into_iter().map(|address| async move { let deposit = self .entry_point @@ -1294,8 +1314,9 @@ impl ProposalContext { } SimulationViolation::UnintendedRevertWithMessage(entity_type, message, address) => { match &message[..4] { - // do not penalize an entity for invalid account nonces, which can occur without malicious intent from the sender - "AA25" => {} + // do not penalize an entity for invalid account nonces or already deployed senders, + // which can occur without malicious intent from the sender or factory + "AA10" | "AA25" => {} _ => { if let Some(entity_address) = address { self.add_entity_update( diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 4c088dbb0..e3e77a319 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -25,7 +25,7 @@ use rundler_types::{ builder::BundlingMode, chain::ChainSpec, pool::{NewHead, Pool}, - EntityUpdate, GasFees, UserOperation, + EntityUpdate, UserOperation, }; use rundler_utils::emit::WithEntryPoint; use tokio::{ @@ -35,7 +35,7 @@ use tokio::{ use tracing::{debug, error, info, instrument, warn}; use crate::{ - bundle_proposer::BundleProposer, + bundle_proposer::{Bundle, BundleProposer, BundleProposerError}, emit::{BuilderEvent, BundleTxDetails}, transaction_tracker::{TrackerUpdate, TransactionTracker, TransactionTrackerError}, }; @@ -47,7 +47,8 @@ pub(crate) trait BundleSender: Send + Sync + 'static { #[derive(Debug)] pub(crate) struct Settings { - pub(crate) max_fee_increases: u64, + pub(crate) max_replacement_underpriced_blocks: u64, + pub(crate) max_cancellation_fee_increases: u64, pub(crate) max_blocks_to_wait_for_mine: u64, } @@ -102,8 +103,12 @@ pub enum SendBundleResult { enum SendBundleAttemptResult { // The bundle was successfully sent Success, - // The bundle was empty - NoOperations, + // There are no operations available to bundle + NoOperationsInitially, + // There were no operations after the fee was increased + NoOperationsAfterFeeFilter, + // There were no operations after the bundle was simulated + NoOperationsAfterSimulation, // Replacement Underpriced ReplacementUnderpriced, // Condition not met @@ -234,11 +239,31 @@ where block_number + self.settings.max_blocks_to_wait_for_mine, ))); } - Ok(SendBundleAttemptResult::NoOperations) => { - debug!("No operations to bundle"); - if inner.fee_increase_count > 0 { + Ok(SendBundleAttemptResult::NoOperationsInitially) => { + debug!("No operations available initially"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); + } + Ok(SendBundleAttemptResult::NoOperationsAfterSimulation) => { + debug!("No operations available after simulation"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); + } + Ok(SendBundleAttemptResult::NoOperationsAfterFeeFilter) => { + debug!("No operations to bundle after fee filtering"); + if let Some(underpriced_info) = inner.underpriced_info { + // If we are here, there are UOs in the pool that may be correctly priced, but are being blocked by an underpriced replacement + // after a fee increase. If we repeatedly get into this state, initiate a cancellation. + if block_number - underpriced_info.since_block + >= self.settings.max_replacement_underpriced_blocks + { + warn!("No operations available, but last replacement underpriced, moving to cancelling state. Round: {}. Since block {}. Current block {}. Max underpriced blocks: {}", underpriced_info.rounds, underpriced_info.since_block, block_number, self.settings.max_replacement_underpriced_blocks); + state.update(InnerState::Cancelling(inner.to_cancelling())); + } else { + info!("No operations available, but last replacement underpriced, starting over and waiting for next trigger. Round: {}. Since block {}. Current block {}", underpriced_info.rounds, underpriced_info.since_block, block_number); + state.update_and_reset(InnerState::Building(inner.underpriced_round())); + } + } else if inner.fee_increase_count > 0 { warn!( - "Abandoning bundle after fee increases {}, no operations available", + "Abandoning bundle after {} fee increases, no operations available after fee increase", inner.fee_increase_count ); self.metrics.increment_bundle_txns_abandoned(); @@ -247,7 +272,7 @@ where // If the node we are using still has the transaction in the mempool, its // possible we will get a `ReplacementUnderpriced` on the next iteration // and will start a cancellation. - state.reset(); + state.abandon(); } else { debug!("No operations available, waiting for next trigger"); state.complete(Some(SendBundleResult::NoOperationsInitially)); @@ -259,13 +284,15 @@ where state.reset(); } Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { - info!("Replacement transaction underpriced, entering cancellation loop"); - state.update(InnerState::Cancelling(inner.to_cancelling())); + info!("Replacement transaction underpriced, marking as underpriced. Num fee increases {:?}", inner.fee_increase_count); + state.update(InnerState::Building( + inner.replacement_underpriced(block_number), + )); } Ok(SendBundleAttemptResult::ConditionNotMet) => { info!("Condition not met, notifying proposer and starting new bundle attempt"); self.proposer.notify_condition_not_met(); - state.reset(); + state.update(InnerState::Building(inner.retry())); } Err(error) => { error!("Bundle send error {error:?}"); @@ -352,7 +379,10 @@ where state: &mut SenderMachineState, inner: CancellingState, ) -> anyhow::Result<()> { - info!("Cancelling last transaction"); + info!( + "Cancelling last transaction, attempt {}", + inner.fee_increase_count + ); let (estimated_fees, _) = self .proposer @@ -381,7 +411,19 @@ where } Err(TransactionTrackerError::ReplacementUnderpriced) => { info!("Replacement transaction underpriced during cancellation, trying again"); - state.update(InnerState::Cancelling(inner.to_self())); + if inner.fee_increase_count >= self.settings.max_cancellation_fee_increases { + // abandon the cancellation + warn!("Abandoning cancellation after max fee increases {}, starting new bundle attempt", inner.fee_increase_count); + self.metrics.increment_cancellations_abandoned(); + state.reset(); + } else { + // Increase fees again + info!( + "Cancellation increasing fees, attempt: {}", + inner.fee_increase_count + 1 + ); + state.update(InnerState::Cancelling(inner.to_self())); + } } Err(TransactionTrackerError::NonceTooLow) => { // reset the transaction tracker and try again @@ -407,10 +449,19 @@ where // check for transaction update if let Some(update) = tracker_update { match update { - TrackerUpdate::Mined { .. } => { + TrackerUpdate::Mined { + gas_used, + gas_price, + .. + } => { // mined - info!("Cancellation transaction mined"); + let fee = gas_used.zip(gas_price).map(|(used, price)| used * price); + info!("Cancellation transaction mined. Price (wei) {fee:?}"); self.metrics.increment_cancellation_txns_mined(); + if let Some(fee) = fee { + self.metrics + .increment_cancellation_txns_total_fee(fee.as_u64()); + }; } TrackerUpdate::LatestTxDropped { .. } => { // If a cancellation gets dropped, move to bundling state as there is no @@ -425,9 +476,10 @@ where } state.reset(); } else if state.block_number() >= inner.until { - if inner.fee_increase_count >= self.settings.max_fee_increases { + if inner.fee_increase_count >= self.settings.max_cancellation_fee_increases { // abandon the cancellation warn!("Abandoning cancellation after max fee increases {}, starting new bundle attempt", inner.fee_increase_count); + self.metrics.increment_cancellations_abandoned(); state.reset(); } else { // start replacement, don't wait for trigger @@ -456,10 +508,22 @@ where ) -> anyhow::Result { let (nonce, required_fees) = state.transaction_tracker.get_nonce_and_required_fees()?; - let Some(bundle_tx) = self - .get_bundle_tx(nonce, required_fees, fee_increase_count > 0) - .await? - else { + let bundle = match self + .proposer + .make_bundle(required_fees, fee_increase_count > 0) + .await + { + Ok(bundle) => bundle, + Err(BundleProposerError::NoOperationsInitially) => { + return Ok(SendBundleAttemptResult::NoOperationsInitially); + } + Err(BundleProposerError::NoOperationsAfterFeeFilter) => { + return Ok(SendBundleAttemptResult::NoOperationsAfterFeeFilter); + } + Err(e) => bail!("Failed to make bundle: {e:?}"), + }; + + let Some(bundle_tx) = self.get_bundle_tx(nonce, bundle).await? else { self.emit(BuilderEvent::formed_bundle( self.builder_index, None, @@ -467,7 +531,7 @@ where fee_increase_count, required_fees, )); - return Ok(SendBundleAttemptResult::NoOperations); + return Ok(SendBundleAttemptResult::NoOperationsAfterSimulation); }; let BundleTx { tx, @@ -525,15 +589,8 @@ where async fn get_bundle_tx( &mut self, nonce: U256, - required_fees: Option, - is_replacement: bool, + bundle: Bundle, ) -> anyhow::Result> { - let bundle = self - .proposer - .make_bundle(required_fees, is_replacement) - .await - .context("proposer should create bundle for builder")?; - let remove_ops_future = async { if bundle.rejected_ops.is_empty() { return; @@ -649,10 +706,21 @@ impl SenderMachineState { let building_state = BuildingState { wait_for_trigger: false, fee_increase_count: 0, + underpriced_info: None, }; self.inner = InnerState::Building(building_state); } + fn update_and_reset(&mut self, inner: InnerState) { + self.update(inner); + self.requires_reset = true; + } + + fn abandon(&mut self) { + self.transaction_tracker.abandon(); + self.inner = InnerState::new(); + } + fn complete(&mut self, result: Option) { if let Some(result) = result { if let Some(r) = self.send_bundle_response.take() { @@ -715,6 +783,7 @@ impl InnerState { InnerState::Building(BuildingState { wait_for_trigger: true, fee_increase_count: 0, + underpriced_info: None, }) } } @@ -723,9 +792,17 @@ impl InnerState { struct BuildingState { wait_for_trigger: bool, fee_increase_count: u64, + underpriced_info: Option, +} + +#[derive(Debug, Clone, Copy)] +struct UnderpricedInfo { + since_block: u64, + rounds: u64, } impl BuildingState { + // Transition to pending state fn to_pending(self, until: u64) -> PendingState { PendingState { until, @@ -733,11 +810,56 @@ impl BuildingState { } } + // Transition to cancelling state fn to_cancelling(self) -> CancellingState { CancellingState { fee_increase_count: 0, } } + + // Retry the build + fn retry(mut self) -> Self { + self.wait_for_trigger = false; + self + } + + // Mark a replacement as underpriced + // + // The next state will NOT wait for a trigger. Use this when fees should be increased and a new bundler + // should be attempted immediately. + fn replacement_underpriced(self, block_number: u64) -> Self { + let ui = if let Some(underpriced_info) = self.underpriced_info { + underpriced_info + } else { + UnderpricedInfo { + since_block: block_number, + rounds: 1, + } + }; + + BuildingState { + wait_for_trigger: false, + fee_increase_count: self.fee_increase_count + 1, + underpriced_info: Some(ui), + } + } + + // Finalize an underpriced round. + // + // This will clear out the number of fee increases and increment the number of underpriced rounds. + // Use this when we are in an underpriced state, but there are no longer any UOs available to bundle. + fn underpriced_round(self) -> Self { + let mut underpriced_info = self + .underpriced_info + .expect("underpriced_info must be Some when calling underpriced_round"); + underpriced_info.rounds += 1; + + BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: Some(underpriced_info), + } + } } #[derive(Debug, Clone, Copy)] @@ -751,6 +873,7 @@ impl PendingState { BuildingState { wait_for_trigger: false, fee_increase_count: self.fee_increase_count + 1, + underpriced_info: None, } } } @@ -1025,6 +1148,14 @@ impl BuilderMetrics { metrics::counter!("builder_cancellation_txns_mined", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } + fn increment_cancellation_txns_total_fee(&self, fee: u64) { + metrics::counter!("builder_cancellation_txns_total_fee", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(fee); + } + + fn increment_cancellations_abandoned(&self) { + metrics::counter!("builder_cancellations_abandoned", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + fn increment_soft_cancellations(&self) { metrics::counter!("builder_soft_cancellations", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); } @@ -1044,7 +1175,7 @@ mod tests { use mockall::Sequence; use rundler_provider::MockEntryPointV0_6; use rundler_types::{ - chain::ChainSpec, pool::MockPool, v0_6::UserOperation, UserOpsPerAggregator, + chain::ChainSpec, pool::MockPool, v0_6::UserOperation, GasFees, UserOpsPerAggregator, }; use tokio::sync::{broadcast, mpsc}; @@ -1197,6 +1328,7 @@ mod tests { nonce: U256::zero(), gas_limit: None, gas_used: None, + gas_price: None, tx_hash: H256::zero(), attempt_number: 0, })) @@ -1231,6 +1363,7 @@ mod tests { InnerState::Building(BuildingState { wait_for_trigger: true, fee_increase_count: 0, + underpriced_info: None, }) )); } @@ -1284,6 +1417,60 @@ mod tests { InnerState::Building(BuildingState { wait_for_trigger: false, fee_increase_count: 1, + underpriced_info: None, + }) + )); + } + + #[tokio::test] + async fn test_transition_to_cancel() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_no_update_last_block(&mut mock_trigger, &mut mock_tracker, &mut seq, 3); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // fee filter error + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| { + Box::pin(async { Err(BundleProposerError::NoOperationsAfterFeeFilter) }) + }); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in underpriced meta-state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: Some(UnderpricedInfo { + since_block: 0, + rounds: 1, + }), + }), + requires_reset: false, + }; + + // step state, block number should trigger move to cancellation + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Cancelling(CancellingState { + fee_increase_count: 0, }) )); } @@ -1432,6 +1619,7 @@ mod tests { inner: InnerState::Building(BuildingState { wait_for_trigger: true, fee_increase_count: 0, + underpriced_info: None, }), requires_reset: false, }; @@ -1446,6 +1634,7 @@ mod tests { InnerState::Building(BuildingState { wait_for_trigger: false, fee_increase_count: 0, + underpriced_info: None, }) )); } @@ -1491,8 +1680,9 @@ mod tests { MockTransactionTracker::new(), MockPool::new(), Settings { - max_fee_increases: 3, + max_cancellation_fee_increases: 3, max_blocks_to_wait_for_mine: 3, + max_replacement_underpriced_blocks: 3, }, broadcast::channel(1000).0, ) diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 933cc35a0..6d4ffe5eb 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -91,8 +91,10 @@ pub struct Args { pub max_blocks_to_wait_for_mine: u64, /// Percentage to increase the fees by when replacing a bundle transaction pub replacement_fee_percent_increase: u64, - /// Maximum number of times to increase the fees when replacing a bundle transaction - pub max_fee_increases: u64, + /// Maximum number of times to increase the fee when cancelling a transaction + pub max_cancellation_fee_increases: u64, + /// Maximum amount of blocks to spend in a replacement underpriced state before moving to cancel + pub max_replacement_underpriced_blocks: u64, /// Address to bind the remote builder server to, if any. If none, no server is starter. pub remote_address: Option, /// Entry points to start builders for @@ -456,7 +458,8 @@ where .await?; let builder_settings = bundle_sender::Settings { - max_fee_increases: self.args.max_fee_increases, + max_replacement_underpriced_blocks: self.args.max_replacement_underpriced_blocks, + max_cancellation_fee_increases: self.args.max_cancellation_fee_increases, max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, }; diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index 4514951f6..fcc5a2c3a 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -50,7 +50,7 @@ pub(crate) trait TransactionTracker: Send + Sync + 'static { expected_stroage: &ExpectedStorage, ) -> TransactionTrackerResult; - /// Cancel the latest transaction in the tracker. + /// Cancel the abandoned transaction in the tracker. /// /// Returns: An option containing the hash of the transaction that was used to cancel. If the option /// is empty, then either no transaction was cancelled or the cancellation was a "soft-cancel." @@ -71,6 +71,9 @@ pub(crate) trait TransactionTracker: Send + Sync + 'static { /// Resets the tracker to its initial state async fn reset(&mut self); + + /// Abandons the current transaction + fn abandon(&mut self); } /// Errors that can occur while using a `TransactionTracker`. @@ -99,6 +102,7 @@ pub(crate) enum TrackerUpdate { attempt_number: u64, gas_limit: Option, gas_used: Option, + gas_price: Option, }, LatestTxDropped { nonce: U256, @@ -121,6 +125,7 @@ where nonce: U256, transactions: Vec, has_dropped: bool, + has_abandoned: bool, attempt_count: u64, } @@ -159,6 +164,7 @@ where nonce, transactions: vec![], has_dropped: false, + has_abandoned: false, attempt_count: 0, }) } @@ -168,6 +174,7 @@ where self.transactions.clear(); self.has_dropped = false; self.attempt_count = 0; + self.has_abandoned = false; self.update_metrics(); } @@ -214,7 +221,7 @@ where async fn get_mined_tx_gas_info( &self, tx_hash: H256, - ) -> anyhow::Result<(Option, Option)> { + ) -> anyhow::Result<(Option, Option, Option)> { let (tx, tx_receipt) = tokio::try_join!( self.provider.get_transaction(tx_hash), self.provider.get_transaction_receipt(tx_hash), @@ -223,14 +230,14 @@ where warn!("failed to fetch transaction data for tx: {}", tx_hash); None }); - let gas_used = match tx_receipt { - Some(r) => r.gas_used, + let (gas_used, gas_price) = match tx_receipt { + Some(r) => (r.gas_used, r.effective_gas_price), None => { warn!("failed to fetch transaction receipt for tx: {}", tx_hash); - None + (None, None) } }; - Ok((gas_limit, gas_used)) + Ok((gas_limit, gas_used, gas_price)) } } @@ -241,7 +248,7 @@ where T: TransactionSender, { fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)> { - let gas_fees = if self.has_dropped { + let gas_fees = if self.has_dropped || self.has_abandoned { None } else { self.transactions.last().map(|tx| { @@ -259,20 +266,45 @@ where ) -> TransactionTrackerResult { self.validate_transaction(&tx)?; let gas_fees = GasFees::from(&tx); - let sent_tx = self.sender.send_transaction(tx, expected_storage).await?; info!( - "Sent transaction {:?} nonce: {:?}", - sent_tx.tx_hash, sent_tx.nonce + "Sending transaction with nonce: {:?} gas fees: {:?}", + self.nonce, gas_fees ); - self.transactions.push(PendingTransaction { - tx_hash: sent_tx.tx_hash, - gas_fees, - attempt_number: self.attempt_count, - }); - self.has_dropped = false; - self.attempt_count += 1; - self.update_metrics(); - Ok(sent_tx.tx_hash) + let sent_tx = self.sender.send_transaction(tx, expected_storage).await; + + match sent_tx { + Ok(sent_tx) => { + info!( + "Sent transaction {:?} nonce: {:?}", + sent_tx.tx_hash, sent_tx.nonce + ); + self.transactions.push(PendingTransaction { + tx_hash: sent_tx.tx_hash, + gas_fees, + attempt_number: self.attempt_count, + }); + self.has_dropped = false; + self.has_abandoned = false; + self.attempt_count += 1; + self.update_metrics(); + Ok(sent_tx.tx_hash) + } + Err(e) if matches!(e, TxSenderError::ReplacementUnderpriced) => { + info!("Replacement underpriced: nonce: {:?}", self.nonce); + // still store this as a pending transaction so that we can continue to increase fees. + self.transactions.push(PendingTransaction { + tx_hash: H256::zero(), + gas_fees, + attempt_number: self.attempt_count, + }); + self.has_dropped = false; + self.has_abandoned = false; + self.attempt_count += 1; + self.update_metrics(); + Err(e.into()) + } + Err(e) => Err(e.into()), + } } async fn cancel_transaction( @@ -309,7 +341,10 @@ where return Ok(None); } - info!("Sent cancellation tx {:?}", cancel_info.tx_hash); + info!( + "Sent cancellation tx {:?} fees: {:?}", + cancel_info.tx_hash, gas_fees + ); self.transactions.push(PendingTransaction { tx_hash: cancel_info.tx_hash, @@ -342,7 +377,8 @@ where .context("tracker should check transaction status when the nonce changes")?; info!("Status of tx {:?}: {:?}", tx.tx_hash, status); if let TxStatus::Mined { block_number } = status { - let (gas_limit, gas_used) = self.get_mined_tx_gas_info(tx.tx_hash).await?; + let (gas_limit, gas_used, gas_price) = + self.get_mined_tx_gas_info(tx.tx_hash).await?; out = TrackerUpdate::Mined { tx_hash: tx.tx_hash, nonce: self.nonce, @@ -350,6 +386,7 @@ where attempt_number: tx.attempt_number, gas_limit, gas_used, + gas_price, }; break; } @@ -378,7 +415,8 @@ where TxStatus::Mined { block_number } => { let nonce = self.nonce; self.set_nonce_and_clear_state(nonce + 1); - let (gas_limit, gas_used) = self.get_mined_tx_gas_info(last_tx.tx_hash).await?; + let (gas_limit, gas_used, gas_price) = + self.get_mined_tx_gas_info(last_tx.tx_hash).await?; Some(TrackerUpdate::Mined { tx_hash: last_tx.tx_hash, nonce, @@ -386,6 +424,7 @@ where attempt_number: last_tx.attempt_number, gas_limit, gas_used, + gas_price, }) } TxStatus::Dropped => { @@ -399,6 +438,12 @@ where let nonce = self.get_external_nonce().await.unwrap_or(self.nonce); self.set_nonce_and_clear_state(nonce); } + + fn abandon(&mut self) { + self.has_abandoned = true; + self.attempt_count = 0; + // remember the transaction in case we need to cancel it + } } impl From for TransactionTrackerError { diff --git a/crates/pool/src/mempool/pool.rs b/crates/pool/src/mempool/pool.rs index 817cd9afe..a5f2eb022 100644 --- a/crates/pool/src/mempool/pool.rs +++ b/crates/pool/src/mempool/pool.rs @@ -28,7 +28,7 @@ use rundler_types::{ Entity, EntityType, GasFees, Timestamp, UserOperation, UserOperationId, UserOperationVariant, }; use rundler_utils::math; -use tracing::info; +use tracing::{info, warn}; use super::{entity_tracker::EntityCounter, size::SizeTracker, MempoolResult, PoolConfig}; use crate::chain::MinedOp; @@ -67,6 +67,8 @@ pub(crate) struct PoolInner { by_id: HashMap, /// Best operations, sorted by gas price best: BTreeSet, + /// Time to mine info + time_to_mine: HashMap, /// Removed operations, temporarily kept around in case their blocks are /// reorged away. Stored along with the block number at which it was /// removed. @@ -95,6 +97,7 @@ impl PoolInner { by_hash: HashMap::new(), by_id: HashMap::new(), best: BTreeSet::new(), + time_to_mine: HashMap::new(), mined_at_block_number_by_hash: HashMap::new(), mined_hashes_with_block_numbers: BTreeSet::new(), count_by_address: HashMap::new(), @@ -172,6 +175,7 @@ impl PoolInner { let block_delta_time = sys_block_time - self.prev_sys_block_time; let block_delta_height = block_number - self.prev_block_number; + let candidate_gas_price = base_fee + candidate_gas_fees.max_priority_fee_per_gas; let mut expired = Vec::new(); let mut num_candidates = 0; @@ -180,12 +184,15 @@ impl PoolInner { expired.push((*hash, op.po.valid_time_range.valid_until)); } - num_candidates += if op.update_time_to_mine( - block_delta_time, - block_delta_height, - candidate_gas_fees, - base_fee, - ) { + let uo_gas_price = cmp::min( + op.uo().max_fee_per_gas(), + op.uo().max_priority_fee_per_gas() + base_fee, + ); + + num_candidates += if uo_gas_price >= candidate_gas_price { + if let Some(ttm) = self.time_to_mine.get_mut(hash) { + ttm.increase(block_delta_time, block_delta_height); + } 1 } else { 0 @@ -282,7 +289,11 @@ impl PoolInner { block_number: u64, ) -> Option> { let tx_in_pool = self.by_id.get(&mined_op.id())?; - PoolMetrics::record_time_to_mine(&tx_in_pool.time_to_mine, mined_op.entry_point); + if let Some(time_to_mine) = self.time_to_mine.remove(&mined_op.hash) { + PoolMetrics::record_time_to_mine(&time_to_mine, mined_op.entry_point); + } else { + warn!("Could not find time to mine for {:?}", mined_op.hash); + } let hash = tx_in_pool .uo() @@ -420,7 +431,6 @@ impl PoolInner { let pool_op = OrderedPoolOperation { po: op, submission_id: submission_id.unwrap_or_else(|| self.next_submission_id()), - time_to_mine: TimeToMineInfo::new(), }; // update counts @@ -439,6 +449,7 @@ impl PoolInner { self.by_hash.insert(hash, pool_op.clone()); self.by_id.insert(pool_op.uo().id(), pool_op.clone()); self.best.insert(pool_op); + self.time_to_mine.insert(hash, TimeToMineInfo::new()); // TODO(danc): This silently drops UOs from the pool without reporting let removed = self @@ -461,6 +472,7 @@ impl PoolInner { let id = &op.po.uo.id(); self.by_id.remove(id); self.best.remove(&op); + self.time_to_mine.remove(&hash); if let Some(block_number) = block_number { self.cache_size += op.mem_size(); @@ -525,7 +537,6 @@ impl PoolInner { struct OrderedPoolOperation { po: Arc, submission_id: u64, - time_to_mine: TimeToMineInfo, } impl OrderedPoolOperation { @@ -536,28 +547,6 @@ impl OrderedPoolOperation { fn mem_size(&self) -> usize { std::mem::size_of::() + self.po.mem_size() } - - fn update_time_to_mine( - &mut self, - block_delta_time: Duration, - block_delta_height: u64, - candidate_gas_fees: GasFees, - base_fee: U256, - ) -> bool { - let candidate_gas_price = base_fee + candidate_gas_fees.max_priority_fee_per_gas; - let uo_gas_price = cmp::min( - self.uo().max_fee_per_gas(), - self.uo().max_priority_fee_per_gas() + base_fee, - ); - - if uo_gas_price >= candidate_gas_price { - self.time_to_mine - .increase(block_delta_time, block_delta_height); - true - } else { - false - } - } } impl Eq for OrderedPoolOperation {} @@ -1012,7 +1001,6 @@ mod tests { OrderedPoolOperation { po: Arc::new(po1), submission_id: 0, - time_to_mine: TimeToMineInfo::new() } .mem_size() ); @@ -1053,7 +1041,6 @@ mod tests { OrderedPoolOperation { po: Arc::new(po2), submission_id: 0, - time_to_mine: TimeToMineInfo::new(), } .mem_size() ); @@ -1129,7 +1116,6 @@ mod tests { OrderedPoolOperation { po: Arc::new(create_op(Address::random(), 1, 1)), submission_id: 1, - time_to_mine: TimeToMineInfo::new(), } .mem_size() } diff --git a/docs/architecture/builder.md b/docs/architecture/builder.md index 1ac0df6db..767770880 100644 --- a/docs/architecture/builder.md +++ b/docs/architecture/builder.md @@ -59,25 +59,14 @@ When using AWS KMS for signing Rundler requires the use of Redis to perform key To ensure that no two signers in a bundler system attempt to use the same key, causing nonce collisions, this key leasing system is used to lease a key in a CLI configured list to a single signer at a time. ## Transaction Senders - The builder supports multiple sender implementations to support bundle transaction submission to different types of APIs. -- **Raw**: Send the bundle as an `eth_sendRawTransaction` via a standard ETH JSON-RPC. - -- **Conditional**: Send the bundle as an `eth_sendRawTransactionConditional` to an interface that supports the [conditional transaction RPC](https://notes.ethereum.org/@yoav/SkaX2lS9j). +- **Raw**: Send the bundle as an `eth_sendRawTransaction` via a standard ETH JSON-RPC. If conditional RPC is enabled it will send the bundle as an `eth_sendRawTransactionConditional` to an interface that supports the [conditional transaction RPC](https://notes.ethereum.org/@yoav/SkaX2lS9j). - **Flashbots**: Submit bundles via the [Flashbots Protect](https://docs.flashbots.net/) RPC endpoint, only supported on Ethereum Mainnet. - **Bloxroute**: Submit bundles via Bloxroute's [Polygon Private Transaction](https://docs.bloxroute.com/apis/frontrunning-protection/polygon_private_tx) endpoint. Only supported on polygon. -## Transaction Tracking - -After the bundle transaction is sent, the sender tracks its status via the transaction tracker module. This module tracks to see if a transaction is pending, dropped, or mined. - -If after a configured amount of blocks the transaction is still pending, the sender will attempt to re-estimate gas fees and will submit a new bundle that replaces the old bundle. - -If dropped or mined, the sender will restart the process. - ## N-Senders Rundler has the ability to run N bundle sender state machines in parallel, each configured with their own distinct signer/account for bundle submission. @@ -85,3 +74,64 @@ Rundler has the ability to run N bundle sender state machines in parallel, each In order for bundle proposers to avoid attempting to bundle the same UO, the sender is configured with a mempool shard index that is added to the request to the pool. This shard index is used by the pool to always return a disjoint set of UOs to each sender. N-senders can be useful to increase bundler gas throughput. + +## Sender State Machine + +The bundle sender is implemented as an finite state machine to continuously submit bundle transactions onchain. The state machine runs as long as the builder process is running. + +### States + +**`Building`** + +In the building state the sender is waiting for a trigger. Once triggered, the sender will query the mempool for available user operations. Those user operations are then filtered by the current fees, total gas limit, and simulation results. If before/after the filtering there are no candidate user operations, the sender will wait for another trigger. If there are candidate user operations, a bundle transaction is submitted. If a cancellation is required, the sender will transfer to the cancelling state. + +**`Pending`** + +In the pending state the builder is waiting for a bundle transaction to be mined. It will wait in this state for up to `max_blocks_to_wait_for_mine` blocks. If mined, dropped, or timed out (abandoned) the sender will transition back to the building state with the appropriate metadata captured. + +**`Cancelling`** + +In the cancelling state the builder creates a cancellation operation. The shape of this operation depends on the type of transaction sender being used. If a "hard" cancellation operation is submitted the sender will submit a cancellation transaction and transition to the cancel pending state. If a "soft" cancellation operation is submitted it will transition back to the building state immediately. + +**`CancelPending`** + +In the cancel pending state the builder is waiting for a cancellation transaction to be mined. It will wait in this state for up to `max_blocks_to_wait_for_mine` blocks. If mined, the sender will transition back to the building state. If dropped or timed out (abandoned), the sender will transition back to the cancelling state. If the sender has already performed `max_cancellation_fee_increases`, and the transaction has been abandoned, it will transition back to the building state and reset internal state. + +### Triggers + +While in the building state the sender is waiting for a trigger. There are 3 types of triggers: + +* New block (building mode: auto): Trigger bundle building when a new block is mined. +* Time (building mode: auto): Trigger bundle building after `bundle_max_send_interval_millis` (chain spec) has elapsed without a bundle attempt. +* Manual call (building mode: manual): Trigger bundle building on a call to `debug_bundler_sendBundleNow`. + +### Cancellations + +Cancellations occur in a specific scenario: there are user operations available that pay more than the estimated gas price, but when the sender submits the bundle transaction it receives a "replacement underpriced" error. If after increasing the fee the user operations are priced out, we are in an "underpriced" meta-state. + +The first time the sender encounters this state it will capture the block number and attempt to create another bundle, resetting the fees. During subsequent encounters the builder will compare that block number to latest, if the difference is more than `max_replacement_underpriced_blocks`, the builder will move to a cancellation state. + +The goal of the cancellation state is to remove the pending transaction from the mempool that is blocking the bundle submission, and to do so while spending the least amount of gas. There are two types of cancellations: "hard" and "soft." A "hard" cancellation requires a transaction to be sent onchain. This is typically an empty transaction to minimize costs. A "soft" cancellation does not require a transaction and is simply an RPC interaction. + +### Diagram + +```mermaid +--- +title: Bundle Sender State Machine (Simplified) +--- +stateDiagram-v2 + Building: Building + Pending + Cancelling + CancelPending + + [*] --> Building + Building --> Building : No operations + Building --> Pending : Bundle submitted + Pending --> Building : Bundle mined/dropped/abandoned + Building --> Cancelling : Cancel triggered + Cancelling --> CancelPending: Hard cancellation submitted + Cancelling --> Building : Soft cancellation completed + CancelPending --> Cancelling: Cancellation dropped/abandoned + CancelPending --> Building: Cancellation mined/aborted +``` diff --git a/docs/cli.md b/docs/cli.md index ff26b0216..05d9f056a 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -187,8 +187,10 @@ List of command line options for configuring the Builder. - env: *BUILDER_MAX_BLOCKS_TO_WAIT_FOR_MINE* - `--builder.replacement_fee_percent_increase`: Percentage amount to increase gas fees when retrying a transaction after it failed to mine (default: `10`) - env: *BUILDER_REPLACEMENT_FEE_PERCENT_INCREASE* -- `--builder.max_fee_increases`: Maximum number of fee increases to attempt (Seven increases of 10% is roughly 2x the initial fees) (default: `7`) - - env: *BUILDER_MAX_FEE_INCREASES* +- `--builder.max_cancellation_fee_increases`: Maximum number of cancellation fee increases to attempt (default: `15`) + - env: *BUILDER_MAX_CANCELLATION_FEE_INCREASES* +- `--builder.max_replacement_underpriced_blocks`: The maximum number of blocks to wait in a replacement underpriced state before issuing a cancellation transaction (default: `20`) + - env: *BUILDER_MAX_REPLACEMENT_UNDERPRICED_BLOCKS* - `--builder.sender`: Choice of what sender type to use for transaction submission. (default: `raw`, options: `raw`, `flashbots`, `polygon_bloxroute`) - env: *BUILDER_SENDER* - `--builder.submit_url`: Only used if builder.sender == "raw." If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. From 3ffd8755ce794293a8c48298b45d53b95a014bd8 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 25 Jun 2024 13:20:38 -0500 Subject: [PATCH 13/17] fix(rpc): fix error message for unstaked entity mempool count --- crates/pool/proto/op_pool/op_pool.proto | 2 +- crates/pool/src/mempool/uo_pool.rs | 4 ++-- crates/pool/src/server/remote/error.rs | 6 +++--- crates/rpc/src/eth/error.rs | 4 ++-- crates/types/src/pool/error.rs | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/pool/proto/op_pool/op_pool.proto b/crates/pool/proto/op_pool/op_pool.proto index d5cd68805..0b1e6ab65 100644 --- a/crates/pool/proto/op_pool/op_pool.proto +++ b/crates/pool/proto/op_pool/op_pool.proto @@ -511,7 +511,7 @@ message PaymasterBalanceTooLow { message MaxOperationsReachedError { uint64 num_ops = 1; - bytes entity_address = 2; + Entity entity = 2; } message EntityThrottledError { diff --git a/crates/pool/src/mempool/uo_pool.rs b/crates/pool/src/mempool/uo_pool.rs index 925de0bef..2805def3a 100644 --- a/crates/pool/src/mempool/uo_pool.rs +++ b/crates/pool/src/mempool/uo_pool.rs @@ -477,7 +477,7 @@ where { return Err(MempoolError::MaxOperationsReached( self.config.same_sender_mempool_count, - pool_op.uo.sender(), + Entity::account(pool_op.uo.sender()), )); } @@ -490,7 +490,7 @@ where if state.pool.address_count(&entity.address) >= ops_allowed as usize { return Err(MempoolError::MaxOperationsReached( ops_allowed as usize, - entity.address, + entity, )); } } diff --git a/crates/pool/src/server/remote/error.rs b/crates/pool/src/server/remote/error.rs index 99a2286ec..33ae98209 100644 --- a/crates/pool/src/server/remote/error.rs +++ b/crates/pool/src/server/remote/error.rs @@ -83,7 +83,7 @@ impl TryFrom for MempoolError { Some(mempool_error::Error::MaxOperationsReached(e)) => { MempoolError::MaxOperationsReached( e.num_ops as usize, - from_bytes(&e.entity_address)?, + (&e.entity.context("should have entity in error")?).try_into()?, ) } Some(mempool_error::Error::EntityThrottled(e)) => MempoolError::EntityThrottled( @@ -168,11 +168,11 @@ impl From for ProtoMempoolError { }, )), }, - MempoolError::MaxOperationsReached(ops, addr) => ProtoMempoolError { + MempoolError::MaxOperationsReached(ops, entity) => ProtoMempoolError { error: Some(mempool_error::Error::MaxOperationsReached( MaxOperationsReachedError { num_ops: ops as u64, - entity_address: addr.to_proto_bytes(), + entity: Some((&entity).into()), }, )), }, diff --git a/crates/rpc/src/eth/error.rs b/crates/rpc/src/eth/error.rs index 0c58697a7..b180ea3f5 100644 --- a/crates/rpc/src/eth/error.rs +++ b/crates/rpc/src/eth/error.rs @@ -93,8 +93,8 @@ pub enum EthRpcError { #[error("operation is out of time range")] OutOfTimeRange(OutOfTimeRangeData), /// Max operations reached for this sender - #[error("Max operations ({0}) reached for sender {1:#032x} due to being unstaked")] - MaxOperationsReached(usize, Address), + #[error("Max operations ({0}) reached for {1} due to being unstaked")] + MaxOperationsReached(usize, Entity), /// Entity throttled or banned #[error("{} {:#032x} throttled or banned", .0.kind, .0.address)] ThrottledOrBanned(Entity), diff --git a/crates/types/src/pool/error.rs b/crates/types/src/pool/error.rs index 752030d98..e81603251 100644 --- a/crates/types/src/pool/error.rs +++ b/crates/types/src/pool/error.rs @@ -56,7 +56,7 @@ pub enum MempoolError { ReplacementUnderpriced(U256, U256), /// Max operations reached for unstaked sender [UREP-010] or unstaked non-sender entity [UREP-020] #[error("Max operations ({0}) reached for entity {1}")] - MaxOperationsReached(usize, Address), + MaxOperationsReached(usize, Entity), /// Multiple roles violation /// Spec rule: STO-040 #[error("A {} at {} in this UserOperation is used as a sender entity in another UserOperation currently in mempool.", .0.kind, .0.address)] From cfb8a15585595d67975c54147a6cbb9da50a85b2 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 25 Jun 2024 14:05:41 -0500 Subject: [PATCH 14/17] chore: fix docs and set v0.3 version number --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- bin/rundler/src/cli/builder.rs | 5 ++--- docs/cli.md | 8 ++++---- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 041db3126..43ad4276d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4026,7 +4026,7 @@ dependencies = [ [[package]] name = "rundler" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "clap", @@ -4065,7 +4065,7 @@ dependencies = [ [[package]] name = "rundler-builder" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4106,7 +4106,7 @@ dependencies = [ [[package]] name = "rundler-dev" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "ethers", @@ -4115,7 +4115,7 @@ dependencies = [ [[package]] name = "rundler-pool" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "async-stream", @@ -4149,7 +4149,7 @@ dependencies = [ [[package]] name = "rundler-provider" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4170,7 +4170,7 @@ dependencies = [ [[package]] name = "rundler-rpc" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4197,7 +4197,7 @@ dependencies = [ [[package]] name = "rundler-sim" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "arrayvec", @@ -4224,7 +4224,7 @@ dependencies = [ [[package]] name = "rundler-task" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4244,7 +4244,7 @@ dependencies = [ [[package]] name = "rundler-tools" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "clap", @@ -4261,7 +4261,7 @@ dependencies = [ [[package]] name = "rundler-types" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4284,7 +4284,7 @@ dependencies = [ [[package]] name = "rundler-utils" -version = "0.2.2" +version = "0.3.0" dependencies = [ "anyhow", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index f10095bab..4d58c48cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ default-members = ["bin/rundler"] resolver = "2" [workspace.package] -version = "0.2.2" +version = "0.3.0" edition = "2021" rust-version = "1.75" license = "MIT OR Apache-2.0" diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index 333c86dbe..006907c61 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -70,7 +70,7 @@ pub struct BuilderArgs { /// Private keys to use for signing transactions /// - /// Cannot use both `builder.private_key` and `builder.aws_kms_key_ids` at the same time. + /// Cannot use both `builder.private_keys` and `builder.aws_kms_key_ids` at the same time. #[arg( long = "builder.private_keys", name = "builder.private_keys", @@ -138,8 +138,7 @@ pub struct BuilderArgs { )] pub submit_url: Option, - /// If present, the url of the ETH provider that will be used to check - /// transaction status. Else will use the node http for status. + /// If true, use the submit endpoint for transaction status checks. /// /// Only used when BUILDER_SENDER is "raw" #[arg( diff --git a/docs/cli.md b/docs/cli.md index 05d9f056a..194f76985 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -227,11 +227,11 @@ Here are some example commands to use the CLI: ```sh # Run the Node subcommand with custom options -$ ./rundler node --chain_id 1337 --max_verification_gas 10000000 --disable_entry_point_v0_6 +$ ./rundler node --network dev --disable_entry_point_v0_6 --node_http http://localhost:8545 --builder.private_keys 0x0000000000000000000000000000000000000000000000000000000000000001 -# Run the RPC subcommand with custom options and enable JSON logging. The builder and pool will need to be running before this starts. -$ ./rundler rpc --node_http http://localhost:8545 --log.json --disable_entry_point_v0_6 +# Run the RPC subcommand with custom options and enable JSON logging. The builder (localhost:50052) and pool (localhost:50051) will need to be running before this starts. +$ ./rundler rpc --network dev --node_http http://localhost:8545 --log.json --disable_entry_point_v0_6 # Run the Pool subcommand with custom options and specify a mempool config file -$ ./rundler pool --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --chain_id 8453 --disable_entry_point_v0_6 +$ ./target/debug/rundler pool --network dev --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --disable_entry_point_v0_6 ``` From dec7547e652845a77c913d8272078f44485564b6 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Wed, 26 Jun 2024 16:16:51 -0500 Subject: [PATCH 15/17] fix(pool): fix crash when block height goes backwards --- crates/builder/src/transaction_tracker.rs | 6 ++++-- crates/pool/src/mempool/pool.rs | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index fcc5a2c3a..582ab6f69 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -267,8 +267,10 @@ where self.validate_transaction(&tx)?; let gas_fees = GasFees::from(&tx); info!( - "Sending transaction with nonce: {:?} gas fees: {:?}", - self.nonce, gas_fees + "Sending transaction with nonce: {:?} gas fees: {:?} gas limit: {:?}", + self.nonce, + gas_fees, + tx.gas() ); let sent_tx = self.sender.send_transaction(tx, expected_storage).await; diff --git a/crates/pool/src/mempool/pool.rs b/crates/pool/src/mempool/pool.rs index a5f2eb022..e3a0f0595 100644 --- a/crates/pool/src/mempool/pool.rs +++ b/crates/pool/src/mempool/pool.rs @@ -173,8 +173,8 @@ impl PoolInner { .duration_since(UNIX_EPOCH) .expect("time should be after epoch"); - let block_delta_time = sys_block_time - self.prev_sys_block_time; - let block_delta_height = block_number - self.prev_block_number; + let block_delta_time = sys_block_time.saturating_sub(self.prev_sys_block_time); + let block_delta_height = block_number.saturating_sub(self.prev_block_number); let candidate_gas_price = base_fee + candidate_gas_fees.max_priority_fee_per_gas; let mut expired = Vec::new(); let mut num_candidates = 0; From e06ca61820e2dcc43c457347f386b8c96282f090 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Thu, 27 Jun 2024 08:02:14 -0500 Subject: [PATCH 16/17] chore(amoy): raise amoy chain config min fee to 30gwei --- bin/rundler/chain_specs/polygon_amoy.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/bin/rundler/chain_specs/polygon_amoy.toml b/bin/rundler/chain_specs/polygon_amoy.toml index 144ed230c..f56d019e3 100644 --- a/bin/rundler/chain_specs/polygon_amoy.toml +++ b/bin/rundler/chain_specs/polygon_amoy.toml @@ -2,5 +2,3 @@ base = "polygon" name = "Polygon Amoy" id = 80002 - -min_max_priority_fee_per_gas = "0x59682F00" # 1_500_000_000 From 355a1089fa2c2c224030e9282ce00ad748b0d9c3 Mon Sep 17 00:00:00 2001 From: dancoombs Date: Tue, 25 Jun 2024 14:05:41 -0500 Subject: [PATCH 17/17] chore: update to rust v1.79.0 --- .github/workflows/ci.yaml | 2 +- .github/workflows/docker-release.yaml | 2 +- Cargo.toml | 2 +- Dockerfile | 2 +- bin/rundler/src/cli/node/events.rs | 7 - crates/builder/src/sender/bloxroute.rs | 54 +------ crates/builder/src/sender/flashbots.rs | 140 +----------------- crates/builder/src/sender/mod.rs | 20 +-- crates/builder/src/sender/raw.rs | 10 +- crates/builder/src/task.rs | 1 - .../provider/src/ethers/metrics_middleware.rs | 12 +- 11 files changed, 24 insertions(+), 228 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 01b49772c..4f2caa9ce 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -17,7 +17,7 @@ jobs: uses: dtolnay/rust-toolchain@stable with: components: clippy - toolchain: 1.75.0 + toolchain: 1.79.0 - name: Install toolchain (nightly) run: rustup toolchain add nightly --component rustfmt --profile minimal - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/docker-release.yaml b/.github/workflows/docker-release.yaml index ddf8bb61d..99aed36d7 100644 --- a/.github/workflows/docker-release.yaml +++ b/.github/workflows/docker-release.yaml @@ -31,7 +31,7 @@ jobs: uses: dtolnay/rust-toolchain@stable with: components: clippy - toolchain: 1.75.0 + toolchain: 1.79.0 - name: Install toolchain (nightly) run: rustup toolchain add nightly --component rustfmt --profile minimal diff --git a/Cargo.toml b/Cargo.toml index 4d58c48cb..31d6f3244 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ resolver = "2" [workspace.package] version = "0.3.0" edition = "2021" -rust-version = "1.75" +rust-version = "1.79" license = "MIT OR Apache-2.0" repository = "https://github.com/alchemyplatform/rundler" diff --git a/Dockerfile b/Dockerfile index 095574321..f257e999f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # Adapted from https://github.com/paradigmxyz/reth/blob/main/Dockerfile # syntax=docker/dockerfile:1.4 -FROM --platform=$TARGETPLATFORM rust:1.75.0 AS chef-builder +FROM --platform=$TARGETPLATFORM rust:1.79.0 AS chef-builder # Install system dependencies RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/bin/rundler/src/cli/node/events.rs b/bin/rundler/src/cli/node/events.rs index 21af478f7..8066acd9d 100644 --- a/bin/rundler/src/cli/node/events.rs +++ b/bin/rundler/src/cli/node/events.rs @@ -13,7 +13,6 @@ use std::fmt::Display; -use ethers::types::Address; use rundler_builder::BuilderEvent; use rundler_pool::PoolEvent; @@ -23,12 +22,6 @@ pub enum Event { BuilderEvent(BuilderEvent), } -#[derive(Clone, Debug)] -pub struct WithEntryPoint { - pub entry_point: Address, - pub event: T, -} - impl From for Event { fn from(event: PoolEvent) -> Self { Self::PoolEvent(event) diff --git a/crates/builder/src/sender/bloxroute.rs b/crates/builder/src/sender/bloxroute.rs index 66a8807b3..7137a4c6b 100644 --- a/crates/builder/src/sender/bloxroute.rs +++ b/crates/builder/src/sender/bloxroute.rs @@ -11,16 +11,13 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::Context; use ethers::{ middleware::SignerMiddleware, providers::{JsonRpcClient, Middleware, Provider}, - types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, - U256, - }, + types::{transaction::eip2718::TypedTransaction, Address, Bytes, TxHash, H256, U256}, utils::hex, }; use ethers_signers::Signer; @@ -32,7 +29,6 @@ use rundler_sim::ExpectedStorage; use rundler_types::GasFees; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue; -use tokio::time; use tonic::async_trait; use super::{ @@ -46,9 +42,7 @@ where S: Signer + 'static, { provider: SignerMiddleware>, S>, - raw_provider: Arc>, client: PolygonBloxrouteClient, - poll_interval: Duration, } #[async_trait] @@ -107,15 +101,6 @@ where .unwrap_or(TxStatus::Pending)) } - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(Self::wait_until_mined_no_drop( - tx_hash, - Arc::clone(&self.raw_provider), - self.poll_interval, - ) - .await?) - } - fn address(&self) -> Address { self.provider.address() } @@ -126,45 +111,12 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { - pub(crate) fn new( - provider: Arc>, - signer: S, - poll_interval: Duration, - auth_header: &str, - ) -> Result { + pub(crate) fn new(provider: Arc>, signer: S, auth_header: &str) -> Result { Ok(Self { provider: SignerMiddleware::new(Arc::clone(&provider), signer), - raw_provider: provider, client: PolygonBloxrouteClient::new(auth_header)?, - poll_interval, }) } - - async fn wait_until_mined_no_drop( - tx_hash: H256, - provider: Arc>, - poll_interval: Duration, - ) -> Result> { - loop { - let tx = provider - .get_transaction(tx_hash) - .await - .context("provider should return transaction status")?; - - match tx.and_then(|tx| tx.block_number) { - None => {} - Some(_) => { - let receipt = provider - .get_transaction_receipt(tx_hash) - .await - .context("provider should return transaction receipt")?; - return Ok(receipt); - } - } - - time::sleep(poll_interval).await; - } - } } struct PolygonBloxrouteClient { diff --git a/crates/builder/src/sender/flashbots.rs b/crates/builder/src/sender/flashbots.rs index eecaf1144..d1e44272f 100644 --- a/crates/builder/src/sender/flashbots.rs +++ b/crates/builder/src/sender/flashbots.rs @@ -13,27 +13,16 @@ // Adapted from https://github.com/onbjerg/ethers-flashbots and // https://github.com/gakonst/ethers-rs/blob/master/ethers-providers/src/toolbox/pending_transaction.rs -use std::{ - future::Future, - pin::Pin, - str::FromStr, - sync::Arc, - task::{Context as TaskContext, Poll}, -}; +use std::{str::FromStr, sync::Arc}; use anyhow::{anyhow, Context}; use ethers::{ middleware::SignerMiddleware, - providers::{interval, JsonRpcClient, Middleware, Provider}, - types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, H256, U256, U64, - }, + providers::{JsonRpcClient, Middleware, Provider}, + types::{transaction::eip2718::TypedTransaction, Address, Bytes, H256, U256, U64}, utils, }; use ethers_signers::Signer; -use futures_timer::Delay; -use futures_util::{Stream, StreamExt, TryFutureExt}; -use pin_project::pin_project; use reqwest::{ header::{HeaderMap, HeaderValue, CONTENT_TYPE}, Client, Response, @@ -131,17 +120,6 @@ where }) } - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok( - PendingFlashbotsTransaction::new( - tx_hash, - self.provider.inner(), - &self.flashbots_client, - ) - .await?, - ) - } - fn address(&self) -> Address { self.provider.address() } @@ -384,118 +362,6 @@ where } } -type PinBoxFut<'a, T> = Pin> + Send + 'a>>; - -enum PendingFlashbotsTxState<'a> { - InitialDelay(Pin>), - PausedGettingTx, - GettingTx(PinBoxFut<'a, FlashbotsAPIResponse>), - PausedGettingReceipt, - GettingReceipt(PinBoxFut<'a, Option>), - Completed, -} - -#[pin_project] -struct PendingFlashbotsTransaction<'a, P, S> { - tx_hash: H256, - provider: &'a Provider

, - client: &'a FlashbotsClient, - state: PendingFlashbotsTxState<'a>, - interval: Box + Send + Unpin>, -} - -impl<'a, P: JsonRpcClient, S> PendingFlashbotsTransaction<'a, P, S> { - fn new(tx_hash: H256, provider: &'a Provider

, client: &'a FlashbotsClient) -> Self { - let delay = Box::pin(Delay::new(provider.get_interval())); - - Self { - tx_hash, - provider, - client, - state: PendingFlashbotsTxState::InitialDelay(delay), - interval: Box::new(interval(provider.get_interval())), - } - } -} - -impl<'a, P: JsonRpcClient, S: Send + Sync> Future for PendingFlashbotsTransaction<'a, P, S> { - type Output = anyhow::Result>; - - fn poll(self: Pin<&mut Self>, ctx: &mut TaskContext<'_>) -> Poll { - let this = self.project(); - - match this.state { - PendingFlashbotsTxState::InitialDelay(fut) => { - futures_util::ready!(fut.as_mut().poll(ctx)); - let status_fut = Box::pin(this.client.status(*this.tx_hash)); - *this.state = PendingFlashbotsTxState::GettingTx(status_fut); - ctx.waker().wake_by_ref(); - return Poll::Pending; - } - PendingFlashbotsTxState::PausedGettingTx => { - let _ready = futures_util::ready!(this.interval.poll_next_unpin(ctx)); - let status_fut = Box::pin(this.client.status(*this.tx_hash)); - *this.state = PendingFlashbotsTxState::GettingTx(status_fut); - ctx.waker().wake_by_ref(); - return Poll::Pending; - } - PendingFlashbotsTxState::GettingTx(fut) => { - let status = futures_util::ready!(fut.as_mut().poll(ctx))?; - tracing::debug!("Transaction:status {:?}:{:?}", *this.tx_hash, status.status); - match status.status { - FlashbotsAPITransactionStatus::Pending => { - *this.state = PendingFlashbotsTxState::PausedGettingTx; - ctx.waker().wake_by_ref(); - } - FlashbotsAPITransactionStatus::Included => { - let receipt_fut = Box::pin( - this.provider - .get_transaction_receipt(*this.tx_hash) - .map_err(|e| anyhow::anyhow!("failed to get receipt: {:?}", e)), - ); - *this.state = PendingFlashbotsTxState::GettingReceipt(receipt_fut); - ctx.waker().wake_by_ref(); - } - FlashbotsAPITransactionStatus::Cancelled => { - return Poll::Ready(Ok(None)); - } - FlashbotsAPITransactionStatus::Failed - | FlashbotsAPITransactionStatus::Unknown => { - return Poll::Ready(Err(anyhow::anyhow!( - "transaction failed with status {:?}", - status.status - ))); - } - } - } - PendingFlashbotsTxState::PausedGettingReceipt => { - let _ready = futures_util::ready!(this.interval.poll_next_unpin(ctx)); - let fut = Box::pin( - this.provider - .get_transaction_receipt(*this.tx_hash) - .map_err(|e| anyhow::anyhow!("failed to get receipt: {:?}", e)), - ); - *this.state = PendingFlashbotsTxState::GettingReceipt(fut); - ctx.waker().wake_by_ref(); - } - PendingFlashbotsTxState::GettingReceipt(fut) => { - if let Some(receipt) = futures_util::ready!(fut.as_mut().poll(ctx))? { - *this.state = PendingFlashbotsTxState::Completed; - return Poll::Ready(Ok(Some(receipt))); - } else { - *this.state = PendingFlashbotsTxState::PausedGettingReceipt; - ctx.waker().wake_by_ref(); - } - } - PendingFlashbotsTxState::Completed => { - panic!("polled pending flashbots transaction future after completion") - } - } - - Poll::Pending - } -} - fn deserialize_u64<'de, D>(deserializer: D) -> std::result::Result where D: de::Deserializer<'de>, diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index fcc0ed70d..9d5445cec 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -14,7 +14,7 @@ mod bloxroute; mod flashbots; mod raw; -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::Context; use async_trait::async_trait; @@ -24,8 +24,8 @@ use ethers::{ prelude::SignerMiddleware, providers::{JsonRpcClient, Middleware, Provider, ProviderError}, types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest, - TransactionReceipt, H256, U256, + transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest, H256, + U256, }, }; use ethers_signers::{LocalWallet, Signer}; @@ -99,8 +99,6 @@ pub(crate) trait TransactionSender: Send + Sync + 'static { async fn get_transaction_status(&self, tx_hash: H256) -> Result; - async fn wait_until_mined(&self, tx_hash: H256) -> Result>; - fn address(&self) -> Address; } @@ -178,7 +176,6 @@ impl TransactionSenderArgs { rpc_provider: Arc>, submit_provider: Option>>, signer: S, - eth_poll_interval: Duration, ) -> std::result::Result, SenderConstructorErrors> { let sender = match self { @@ -213,14 +210,9 @@ impl TransactionSenderArgs { args.status_url, )?) } - Self::Bloxroute(args) => { - TransactionSenderEnum::PolygonBloxroute(PolygonBloxrouteTransactionSender::new( - rpc_provider, - signer, - eth_poll_interval, - &args.header, - )?) - } + Self::Bloxroute(args) => TransactionSenderEnum::PolygonBloxroute( + PolygonBloxrouteTransactionSender::new(rpc_provider, signer, &args.header)?, + ), }; Ok(sender) } diff --git a/crates/builder/src/sender/raw.rs b/crates/builder/src/sender/raw.rs index 3e23f454f..429b4f570 100644 --- a/crates/builder/src/sender/raw.rs +++ b/crates/builder/src/sender/raw.rs @@ -17,8 +17,8 @@ use anyhow::Context; use async_trait::async_trait; use ethers::{ middleware::SignerMiddleware, - providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256, U256}, + providers::{JsonRpcClient, Middleware, Provider}, + types::{transaction::eip2718::TypedTransaction, Address, H256, U256}, }; use ethers_signers::Signer; use rundler_sim::ExpectedStorage; @@ -122,12 +122,6 @@ where }) } - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(PendingTransaction::new(tx_hash, self.provider.inner()) - .await - .context("should wait for transaction to be mined or dropped")?) - } - fn address(&self) -> Address { self.submitter.address() } diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 6d4ffe5eb..c0ec6d939 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -442,7 +442,6 @@ where Arc::clone(&provider), submit_provider, signer, - self.args.eth_poll_interval, )?; let tracker_settings = transaction_tracker::Settings { diff --git a/crates/provider/src/ethers/metrics_middleware.rs b/crates/provider/src/ethers/metrics_middleware.rs index cea8bde1f..00c3eefd4 100644 --- a/crates/provider/src/ethers/metrics_middleware.rs +++ b/crates/provider/src/ethers/metrics_middleware.rs @@ -91,12 +91,12 @@ where }; let rpc = match rpc_code { - x if x == -32000 => RpcCode::ExecutionFailed, - x if x == -32700 => RpcCode::ParseError, - x if x == -32600 => RpcCode::InvalidRequest, - x if x == -32601 => RpcCode::MethodNotFound, - x if x == -32602 => RpcCode::InvalidParams, - x if x == -32603 => RpcCode::InternalError, + -32700 => RpcCode::ParseError, + -32000 => RpcCode::ExecutionFailed, + -32600 => RpcCode::InvalidRequest, + -32601 => RpcCode::MethodNotFound, + -32602 => RpcCode::InvalidParams, + -32603 => RpcCode::InternalError, x if (-32099..=-32000).contains(&x) => RpcCode::ServerError, x if x >= 0 => RpcCode::Success, _ => RpcCode::Other,