Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add support for avax chains #750

Merged
merged 1 commit into from
Jul 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions bin/rundler/chain_specs/avax.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
name = "Avax"
id = 43114

# Intrinsic cost + overhead of non-reentry storage without refund
transaction_intrinsic_gas = "0x5DC0" # 24_000
# Extra cost of a deploy without refunds
per_user_op_deploy_overhead_gas = "0x4E20" # 20_000
4 changes: 4 additions & 0 deletions bin/rundler/chain_specs/avax_fuji.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
base = "avax"

name = "Avax-Fuji"
id = 43113
7 changes: 3 additions & 4 deletions bin/rundler/src/cli/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
// You should have received a copy of the GNU General Public License along with Rundler.
// If not, see https://www.gnu.org/licenses/.

use std::{net::SocketAddr, time::Duration};
use std::net::SocketAddr;

use anyhow::{bail, Context};
use clap::Args;
Expand Down Expand Up @@ -354,7 +354,6 @@ impl BuilderArgs {
bundle_priority_fee_overhead_percent: common.bundle_priority_fee_overhead_percent,
priority_fee_mode,
sender_args,
eth_poll_interval: Duration::from_millis(common.eth_poll_interval_millis),
sim_settings: common.try_into()?,
max_blocks_to_wait_for_mine: self.max_blocks_to_wait_for_mine,
replacement_fee_percent_increase: self.replacement_fee_percent_increase,
Expand Down Expand Up @@ -442,7 +441,7 @@ pub async fn run(

let task_args = builder_args
.to_args(
chain_spec,
chain_spec.clone(),
&common_args,
Some(format_socket_addr(&builder_args.host, builder_args.port).parse()?),
)
Expand All @@ -451,7 +450,7 @@ pub async fn run(
let pool = connect_with_retries_shutdown(
"op pool from builder",
&pool_url,
RemotePoolClient::connect,
|url| RemotePoolClient::connect(url, chain_spec.clone()),
tokio::signal::ctrl_c(),
)
.await?;
Expand Down
4 changes: 3 additions & 1 deletion bin/rundler/src/cli/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,5 +120,7 @@ define_hardcoded_chain_specs!(
arbitrum,
arbitrum_sepolia,
polygon,
polygon_amoy
polygon_amoy,
avax,
avax_fuji
);
11 changes: 0 additions & 11 deletions bin/rundler/src/cli/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,17 +248,6 @@ pub struct CommonArgs {
)]
pre_verification_gas_accept_percent: u64,

/// Interval at which the builder polls an Eth node for new blocks and
/// mined transactions.
#[arg(
long = "eth_poll_interval_millis",
name = "eth_poll_interval_millis",
env = "ETH_POLL_INTERVAL_MILLIS",
default_value = "100",
global = true
)]
pub eth_poll_interval_millis: u64,

#[arg(
long = "aws_region",
name = "aws_region",
Expand Down
24 changes: 23 additions & 1 deletion bin/rundler/src/cli/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,27 @@ pub struct PoolArgs {
)]
pub allowlist_path: Option<String>,

/// Interval at which the pool polls an Eth node for new blocks
#[arg(
long = "pool.chain_poll_interval_millis",
name = "pool.chain_poll_interval_millis",
env = "POOL_CHAIN_POLL_INTERVAL_MILLIS",
default_value = "100",
global = true
)]
pub chain_poll_interval_millis: u64,

/// The amount of times to retry syncing the chain before giving up and
/// waiting for the next block.
#[arg(
long = "pool.chain_sync_max_retries",
name = "pool.chain_sync_max_retries",
env = "POOL_CHAIN_SYNC_MAX_RETRIES",
default_value = "5",
global = true
)]
pub chain_sync_max_retries: u64,

#[arg(
long = "pool.chain_history_size",
name = "pool.chain_history_size",
Expand Down Expand Up @@ -234,7 +255,8 @@ impl PoolArgs {
.node_http
.clone()
.context("pool requires node_http arg")?,
http_poll_interval: Duration::from_millis(common.eth_poll_interval_millis),
chain_poll_interval: Duration::from_millis(self.chain_poll_interval_millis),
chain_max_sync_retries: self.chain_sync_max_retries,
pool_configs,
remote_address,
chain_update_channel_capacity: self.chain_update_channel_capacity.unwrap_or(1024),
Expand Down
4 changes: 2 additions & 2 deletions bin/rundler/src/cli/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ pub async fn run(
} = rpc_args;

let task_args = rpc_args.to_args(
chain_spec,
chain_spec.clone(),
&common_args,
(&common_args).try_into()?,
(&common_args).into(),
Expand All @@ -165,7 +165,7 @@ pub async fn run(
let pool = connect_with_retries_shutdown(
"op pool from rpc",
&pool_url,
RemotePoolClient::connect,
|url| RemotePoolClient::connect(url, chain_spec.clone()),
tokio::signal::ctrl_c(),
)
.await?;
Expand Down
14 changes: 7 additions & 7 deletions crates/builder/src/bundle_proposer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -471,8 +471,7 @@ where
let mut context = ProposalContext::<UO>::new();
let mut paymasters_to_reject = Vec::<EntityInfo>::new();

let ov = UO::gas_overheads();
let mut gas_spent = ov.transaction_gas_overhead;
let mut gas_spent = self.settings.chain_spec.transaction_intrinsic_gas;
let mut constructed_bundle_size = BUNDLE_BYTE_OVERHEAD;
for (po, simulation) in ops_with_simulations {
let op = po.clone().uo;
Expand Down Expand Up @@ -1243,7 +1242,7 @@ impl<UO: UserOperation> ProposalContext<UO> {
self.iter_ops_with_simulations()
.map(|sim_op| gas::user_operation_gas_limit(chain_spec, &sim_op.op, false))
.fold(U256::zero(), |acc, i| acc + i)
+ UO::gas_overheads().transaction_gas_overhead
+ chain_spec.transaction_intrinsic_gas
}

fn iter_ops_with_simulations(&self) -> impl Iterator<Item = &OpWithSimulation<UO>> + '_ {
Expand Down Expand Up @@ -1423,7 +1422,7 @@ mod tests {
use rundler_sim::MockSimulator;
use rundler_types::{
pool::{MockPool, SimulationViolation},
v0_6::UserOperation,
v0_6::{UserOperation, ENTRY_POINT_INNER_GAS_OVERHEAD},
UserOperation as UserOperationTrait, ValidTimeRange,
};

Expand All @@ -1444,13 +1443,14 @@ mod tests {
}])
.await;

let ov = UserOperation::gas_overheads();
let cs = ChainSpec::default();

let expected_gas = math::increase_by_percent(
op.pre_verification_gas
+ op.verification_gas_limit * 2
+ op.call_gas_limit
+ ov.bundle_transaction_gas_buffer
+ ov.transaction_gas_overhead,
+ cs.transaction_intrinsic_gas
+ ENTRY_POINT_INNER_GAS_OVERHEAD,
BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT,
);

Expand Down
5 changes: 1 addition & 4 deletions crates/builder/src/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,6 @@ pub struct Args {
pub priority_fee_mode: PriorityFeeMode,
/// Sender to be used by the builder
pub sender_args: TransactionSenderArgs,
/// RPC node poll interval
pub eth_poll_interval: Duration,
/// Operation simulation settings
pub sim_settings: SimulationSettings,
/// Maximum number of blocks to wait for a transaction to be mined
Expand Down Expand Up @@ -131,8 +129,7 @@ where
P: Pool + Clone,
{
async fn run(mut self: Box<Self>, shutdown_token: CancellationToken) -> anyhow::Result<()> {
let provider =
rundler_provider::new_provider(&self.args.rpc_url, Some(self.args.eth_poll_interval))?;
let provider = rundler_provider::new_provider(&self.args.rpc_url, None)?;
let submit_provider = if let TransactionSenderArgs::Raw(args) = &self.args.sender_args {
Some(rundler_provider::new_provider(&args.submit_url, None)?)
} else {
Expand Down
38 changes: 32 additions & 6 deletions crates/pool/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ use tokio::{
select,
sync::{broadcast, Semaphore},
task::JoinHandle,
time,
};
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
use tracing::{debug, info, warn};

const MAX_LOAD_OPS_CONCURRENCY: usize = 64;

Expand Down Expand Up @@ -110,6 +111,7 @@ pub(crate) struct Settings {
pub(crate) history_size: u64,
pub(crate) poll_interval: Duration,
pub(crate) entry_point_addresses: HashMap<Address, EntryPointVersion>,
pub(crate) max_sync_retries: u64,
}

#[derive(Debug)]
Expand Down Expand Up @@ -201,13 +203,28 @@ impl<P: Provider> Chain<P> {
)
.await;
block_hash = hash;
let update = self.sync_to_block(block).await;
match update {
Ok(update) => return update,
Err(error) => {
error!("Failed to update chain at block {block_hash:?}. Will try again at next block. {error:?}");

for i in 0..=self.settings.max_sync_retries {
if i > 0 {
ChainMetrics::increment_sync_retries();
}

let update = self.sync_to_block(block.clone()).await;
match update {
Ok(update) => return update,
Err(error) => {
debug!("Failed to update chain at block {block_hash:?}: {error:?}");
}
}

time::sleep(self.settings.poll_interval).await;
}

warn!(
"Failed to update chain at block {:?} after {} retries. Abandoning sync.",
block_hash, self.settings.max_sync_retries
);
ChainMetrics::increment_sync_abandoned();
}
}

Expand Down Expand Up @@ -665,6 +682,14 @@ impl ChainMetrics {
fn increment_total_reorg_depth(depth: u64) {
metrics::counter!("op_pool_chain_total_reorg_depth").increment(depth);
}

fn increment_sync_retries() {
metrics::counter!("op_pool_chain_sync_retries").increment(1);
}

fn increment_sync_abandoned() {
metrics::counter!("op_pool_chain_sync_abandoned").increment(1);
}
}

#[cfg(test)]
Expand Down Expand Up @@ -1366,6 +1391,7 @@ mod tests {
(ENTRY_POINT_ADDRESS_V0_6, EntryPointVersion::V0_6),
(ENTRY_POINT_ADDRESS_V0_7, EntryPointVersion::V0_7),
]),
max_sync_retries: 1,
},
);
(chain, controller)
Expand Down
28 changes: 21 additions & 7 deletions crates/pool/src/server/remote/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,15 @@

use std::{pin::Pin, str::FromStr};

use anyhow::Context;
use ethers::types::{Address, H256};
use futures_util::Stream;
use rundler_task::{
grpc::protos::{from_bytes, ConversionError, ToProtoBytes},
server::{HealthCheck, ServerStatus},
};
use rundler_types::{
chain::ChainSpec,
pool::{
NewHead, PaymasterMetadata, Pool, PoolError, PoolOperation, PoolResult, Reputation,
ReputationStatus, StakeStatus,
Expand Down Expand Up @@ -48,25 +50,27 @@ use super::protos::{
DebugDumpMempoolRequest, DebugDumpPaymasterBalancesRequest, DebugDumpReputationRequest,
DebugSetReputationRequest, GetOpsRequest, GetReputationStatusRequest, GetStakeStatusRequest,
RemoveOpsRequest, ReputationStatus as ProtoReputationStatus, SubscribeNewHeadsRequest,
SubscribeNewHeadsResponse, UpdateEntitiesRequest,
SubscribeNewHeadsResponse, TryUoFromProto, UpdateEntitiesRequest,
};

/// Remote pool client
///
/// Used to submit requests to a remote pool server.
#[derive(Debug, Clone)]
pub struct RemotePoolClient {
chain_spec: ChainSpec,
op_pool_client: OpPoolClient<Channel>,
op_pool_health: HealthClient<Channel>,
}

impl RemotePoolClient {
/// Connect to a remote pool server, returning a client for submitting requests.
pub async fn connect(url: String) -> anyhow::Result<Self> {
pub async fn connect(url: String, chain_spec: ChainSpec) -> anyhow::Result<Self> {
let op_pool_client = OpPoolClient::connect(url.clone()).await?;
let op_pool_health =
HealthClient::new(Channel::builder(Uri::from_str(&url)?).connect().await?);
Ok(Self {
chain_spec,
op_pool_client,
op_pool_health,
})
Expand Down Expand Up @@ -186,7 +190,10 @@ impl Pool for RemotePoolClient {
Some(get_ops_response::Result::Success(s)) => s
.ops
.into_iter()
.map(PoolOperation::try_from)
.map(|proto_uo| {
PoolOperation::try_uo_from_proto(proto_uo, &self.chain_spec)
.context("should convert proto uo to pool operation")
})
.map(|res| res.map_err(PoolError::from))
.collect(),
Some(get_ops_response::Result::Failure(f)) => Err(f.try_into()?),
Expand All @@ -209,9 +216,13 @@ impl Pool for RemotePoolClient {
.result;

match res {
Some(get_op_by_hash_response::Result::Success(s)) => {
Ok(s.op.map(PoolOperation::try_from).transpose()?)
}
Some(get_op_by_hash_response::Result::Success(s)) => Ok(s
.op
.map(|proto_uo| {
PoolOperation::try_uo_from_proto(proto_uo, &self.chain_spec)
.context("should convert proto uo to pool operation")
})
.transpose()?),
Some(get_op_by_hash_response::Result::Failure(e)) => match e.error {
Some(_) => Err(e.try_into()?),
None => Err(PoolError::Other(anyhow::anyhow!(
Expand Down Expand Up @@ -380,7 +391,10 @@ impl Pool for RemotePoolClient {
Some(debug_dump_mempool_response::Result::Success(s)) => s
.ops
.into_iter()
.map(PoolOperation::try_from)
.map(|proto_uo| {
PoolOperation::try_uo_from_proto(proto_uo, &self.chain_spec)
.context("should convert proto uo to pool operation")
})
.map(|res| res.map_err(PoolError::from))
.collect(),
Some(debug_dump_mempool_response::Result::Failure(f)) => Err(f.try_into()?),
Expand Down
Loading
Loading