diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 74b5cfbaa..cb781df3c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -39,6 +39,15 @@ jobs: with: version: nightly + # TODO : For now madara binary is stored in aws s3 bucket : + # After the proper release binaries are implemented + # We can directly use that and we can remove this + # temporary AWS implementation + - name: Download madara binary for l2 client testing + run: | + curl -L https://madara-test-binary.s3.us-west-1.amazonaws.com/madara-linux -o ./test-artifacts/madara + chmod +x ./test-artifacts/madara + - name: Build and run tests env: ETH_FORK_URL: ${{ secrets.ETH_FORK_URL }} diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml index 6d13fd4ed..62a5dfe44 100644 --- a/.github/workflows/rust-test.yml +++ b/.github/workflows/rust-test.yml @@ -20,6 +20,24 @@ jobs: - uses: foundry-rs/foundry-toolchain@v1 with: version: nightly + - name: Launch Anvil + run: anvil --fork-url $ANVIL_FORK_URL --fork-block-number $ANVIL_BLOCK_NUMBER & + env: + ANVIL_FORK_URL: "https://eth.merkle.io" + ANVIL_BLOCK_NUMBER: 20395662 + - name: Wait for Anvil to be ready + run: | + while ! nc -z localhost 8545; do + sleep 1 + done + # TODO : For now madara binary is stored in aws s3 bucket : + # After the proper release binaries are implemented + # We can directly use that and we can remove this + # temporary AWS implementation + - name: Download madara binary for l2 client testing + run: | + curl -L https://madara-test-binary.s3.us-west-1.amazonaws.com/madara-linux -o ./test-artifacts/madara + chmod +x ./test-artifacts/madara - name: Run unit tests run: | diff --git a/.gitignore b/.gitignore index 162dbe853..b59f4e777 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,6 @@ tmp/ # Running madara with make and docker compose .secrets image.tar.gz + +# madara test artifacts for testing l2 clients for appchains +test-artifacts/madara \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cdf30ddc..155b44a0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## Next release - fix(primitives): limit legacy class sizes +- feat : l3 support - fix(block_production): dynamic block closing now adds special address with prev block hash - fix(rpc): call, simulate, estimate rpcs executed on top of the block, not at the start of it - fix(compilation): crate-level compilation diff --git a/Cargo.lock b/Cargo.lock index 50ce03c38..f7de6e626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4664,6 +4664,19 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.31", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -5383,11 +5396,11 @@ dependencies = [ "mc-block-production", "mc-db", "mc-devnet", - "mc-eth", "mc-gateway-client", "mc-gateway-server", "mc-mempool", "mc-rpc", + "mc-settlement-client", "mc-sync", "mc-telemetry", "mp-block", @@ -5406,6 +5419,8 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "starknet-core", + "starknet-providers", "starknet_api", "thiserror 2.0.3", "tokio", @@ -5649,50 +5664,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "mc-eth" -version = "0.7.0" -dependencies = [ - "alloy", - "anyhow", - "bigdecimal", - "bitvec", - "dotenv", - "futures", - "httpmock", - "lazy_static", - "mc-analytics", - "mc-db", - "mc-mempool", - "mp-chain-config", - "mp-convert", - "mp-transactions", - "mp-utils", - "once_cell", - "opentelemetry", - "opentelemetry-appender-tracing", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "opentelemetry-stdout", - "opentelemetry_sdk", - "regex", - "rstest 0.18.2", - "serde", - "serde_json", - "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", - "starknet_api", - "tempfile", - "thiserror 2.0.3", - "time", - "tokio", - "tracing", - "tracing-core", - "tracing-opentelemetry", - "tracing-subscriber", - "tracing-test", - "url", -] - [[package]] name = "mc-exec" version = "0.7.0" @@ -5735,7 +5706,7 @@ dependencies = [ "http 1.1.0", "http-body-util", "hyper 1.5.0", - "hyper-tls", + "hyper-tls 0.6.0", "hyper-util", "mp-block", "mp-class", @@ -5861,6 +5832,65 @@ dependencies = [ "tracing", ] +[[package]] +name = "mc-settlement-client" +version = "0.7.0" +dependencies = [ + "alloy", + "anyhow", + "assert_matches", + "async-trait", + "bigdecimal", + "bitvec", + "dotenv", + "futures", + "hex", + "httpmock", + "lazy_static", + "log", + "m-cairo-test-contracts", + "matchers", + "mc-analytics", + "mc-db", + "mc-mempool", + "mockall", + "mp-chain-config", + "mp-convert", + "mp-transactions", + "mp-utils", + "once_cell", + "opentelemetry", + "opentelemetry-appender-tracing", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "opentelemetry-stdout", + "opentelemetry_sdk", + "regex", + "reqwest 0.11.27", + "rstest 0.18.2", + "serde", + "serde_json", + "serial_test", + "starknet-accounts", + "starknet-contract", + "starknet-core", + "starknet-crypto 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "starknet-providers", + "starknet-signers", + "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", + "starknet_api", + "tempfile", + "thiserror 2.0.3", + "time", + "tokio", + "tracing", + "tracing-core", + "tracing-opentelemetry", + "tracing-subscriber", + "tracing-test", + "url", +] + [[package]] name = "mc-sync" version = "0.7.0" @@ -5971,9 +6001,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -5985,9 +6015,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", @@ -7230,10 +7260,12 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.31", "hyper-rustls 0.24.2", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -7245,6 +7277,7 @@ dependencies = [ "sync_wrapper 0.1.2", "system-configuration 0.5.1", "tokio", + "tokio-native-tls", "tokio-rustls 0.24.1", "tower-service", "url", @@ -7273,7 +7306,7 @@ dependencies = [ "http-body-util", "hyper 1.5.0", "hyper-rustls 0.27.3", - "hyper-tls", + "hyper-tls 0.6.0", "hyper-util", "ipnet", "js-sys", @@ -7725,6 +7758,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28e1c91382686d21b5ac7959341fcb9780fa7c03773646995a87c950fa7be640" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.26" @@ -7787,6 +7829,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sdd" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478f121bb72bbf63c52c93011ea1791dca40140dfe13f8336c4c5ac952c33aa9" + [[package]] name = "sec1" version = "0.7.3" @@ -7985,6 +8033,31 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot 0.12.3", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "sha-1" version = "0.9.8" diff --git a/Cargo.toml b/Cargo.toml index 9a8c743ff..fd2411b39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,9 @@ [workspace] members = [ - # madara "crates/madara/client/db", "crates/madara/client/exec", "crates/madara/client/sync", - "crates/madara/client/eth", + "crates/madara/client/settlement_client", "crates/madara/client/rpc", "crates/madara/client/gateway/client", "crates/madara/client/gateway/server", @@ -31,11 +30,10 @@ members = [ resolver = "2" # Everything except test-related packages, so that they are not compiled when doing `cargo build`. default-members = [ - # madara "crates/madara/client/db", "crates/madara/client/exec", "crates/madara/client/sync", - "crates/madara/client/eth", + "crates/madara/client/settlement_client", "crates/madara/client/gateway/client", "crates/madara/client/gateway/server", "crates/madara/client/rpc", @@ -129,7 +127,7 @@ mc-rpc = { path = "crates/madara/client/rpc" } mc-gateway-client = { path = "crates/madara/client/gateway/client" } mc-gateway-server = { path = "crates/madara/client/gateway/server" } mc-sync = { path = "crates/madara/client/sync" } -mc-eth = { path = "crates/madara/client/eth" } +mc-settlement-client = { path = "crates/madara/client/settlement_client" } mc-mempool = { path = "crates/madara/client/mempool" } mc-block-production = { path = "crates/madara/client/block_production" } mc-block-import = { path = "crates/madara/client/block_import" } @@ -247,6 +245,7 @@ proptest-state-machine = "0.3.1" tempfile = "3.10.1" httpmock = "0.7.0" mockall = "0.13.0" +serial_test = "3.1.1" fdlimit = "0.3.0" assert_matches = "1.5" diff --git a/cairo/src/appchain_test.cairo b/cairo/src/appchain_test.cairo new file mode 100644 index 000000000..78ea72bfa --- /dev/null +++ b/cairo/src/appchain_test.cairo @@ -0,0 +1,53 @@ +#[starknet::contract] +mod StateUpdateContract { + type StateRoot = felt252; + type BlockNumber = felt252; + type BlockHash = felt252; + + #[storage] + struct Storage { + state_root: StateRoot, + block_number: BlockNumber, + block_hash: BlockHash, + } + + #[event] + #[derive(Drop, starknet::Event)] + enum Event { + LogStateUpdate: LogStateUpdate + } + + #[derive(Drop, starknet::Event)] + struct LogStateUpdate { + state_root: felt252, + block_number: felt252, + block_hash: felt252, + } + + #[constructor] + fn constructor(ref self: ContractState) { + // Initialize with default values + self.state_root.write(0); + self.block_number.write(0); + self.block_hash.write(0); + } + + #[external(v0)] + fn update_state( + ref self: ContractState, + block_number: BlockNumber, + state_root: StateRoot, + block_hash: BlockHash + ) { + self.state_root.write(state_root); + self.block_number.write(block_number); + self.block_hash.write(block_hash); + + self.emit(LogStateUpdate { state_root, block_number, block_hash, }); + } + + #[external(v0)] + fn get_state(self: @ContractState) -> (StateRoot, BlockNumber, BlockHash) { + (self.state_root.read(), self.block_number.read(), self.block_hash.read()) + } +} diff --git a/cairo/src/lib.cairo b/cairo/src/lib.cairo index 67958ac67..86f2244a8 100644 --- a/cairo/src/lib.cairo +++ b/cairo/src/lib.cairo @@ -1,3 +1,5 @@ pub mod test_account; pub mod hello; +pub mod appchain_test; +pub mod messaging_test; \ No newline at end of file diff --git a/cairo/src/messaging_test.cairo b/cairo/src/messaging_test.cairo new file mode 100644 index 000000000..0a2ca28a6 --- /dev/null +++ b/cairo/src/messaging_test.cairo @@ -0,0 +1,152 @@ +use starknet::ContractAddress; +use core::integer::u256; + +#[derive(Drop, Serde)] +struct MessageData { + from_address: ContractAddress, + to_address: felt252, + selector: felt252, + payload: Array, + nonce: felt252, +} + +// Separate storage struct to handle the array storage properly +#[derive(Drop, Serde, starknet::Store)] +struct StorageMessageData { + from_address: ContractAddress, + to_address: felt252, + selector: felt252, + nonce: felt252, +} + +#[starknet::interface] +trait IMessagingContract { + fn get_message_data(self: @TContractState) -> MessageData; + fn fire_event(ref self: TContractState); + fn l1_to_l2_message_cancellations(self: @TContractState, msg_hash: felt252) -> u256; + fn set_is_canceled(ref self: TContractState, value: bool); + fn get_l1_to_l2_msg_hash(self: @TContractState) -> felt252; +} + +#[starknet::contract] +mod MessagingContract { + use super::IMessagingContract; + use super::MessageData; + use core::array::SpanTrait; + use core::option::OptionTrait; + use core::traits::Into; + use starknet::ContractAddress; + use core::array::ArrayTrait; + use core::poseidon::poseidon_hash_span; + use core::integer::u256; + + #[event] + #[derive(Drop, starknet::Event)] + enum Event { + MessageSent: MessageSent, + } + + #[derive(Drop, starknet::Event)] + struct MessageSent { + #[key] + message_hash: felt252, + #[key] + from_address: ContractAddress, + #[key] + to_address: felt252, + selector: felt252, + nonce: felt252, + payload: Array, + } + + #[storage] + struct Storage { + is_canceled: bool + } + + #[abi(embed_v0)] + impl MessagingContract of super::IMessagingContract { + fn get_message_data(self: @ContractState) -> MessageData { + let mut payload: Array = ArrayTrait::new(); + // Transfer L2 ---> L3 + // from_address : + // to_address : + // selector : transfer + // payload : [ , 30 ETH ] + payload + .append( + 1745450722268439108899567493174320056804647958314420522290024379112230030194 + .into() + ); + payload.append(30000000000000000000.into()); + payload.append(0.into()); + + MessageData { + from_address: 3293945099482077566294620753663887236810230524774221047563633702975851058323 + .try_into() + .unwrap(), + to_address: 2087021424722619777119509474943472645767659996348769578120564519014510906823 + .into(), + selector: 232670485425082704932579856502088130646006032362877466777181098476241604910 + .into(), + payload, + nonce: 10000000000000000.into(), + } + } + + fn fire_event(ref self: ContractState) { + let data = self.get_message_data(); + let hash = self.get_l1_to_l2_msg_hash(); + self + .emit( + Event::MessageSent( + MessageSent { + message_hash: hash, + from_address: data.from_address, + to_address: data.to_address, + selector: data.selector, + payload: data.payload, + nonce: data.nonce, + } + ) + ); + } + + fn l1_to_l2_message_cancellations(self: @ContractState, msg_hash: felt252) -> u256 { + if self.is_canceled.read() { + 1723134213.into() + } else { + 0.into() + } + } + + fn set_is_canceled(ref self: ContractState, value: bool) { + self.is_canceled.write(value); + } + + fn get_l1_to_l2_msg_hash(self: @ContractState) -> felt252 { + let data = self.get_message_data(); + let mut hash_data: Array = ArrayTrait::new(); + hash_data.append(data.from_address.into()); + hash_data.append(data.to_address); + hash_data.append(data.selector); + hash_data.append(data.nonce); + let len: felt252 = data.payload.len().into(); + hash_data.append(len); + + let mut i: usize = 0; + let payload_span = data.payload.span(); + loop { + if i >= data.payload.len() { + break; + } + let value = *payload_span.at(i); + let value_felt: felt252 = value.try_into().unwrap(); + hash_data.append(value_felt); + i += 1; + }; + + poseidon_hash_span(hash_data.span()) + } + } +} diff --git a/crates/madara/cairo-test-contracts/src/lib.rs b/crates/madara/cairo-test-contracts/src/lib.rs index c89e65522..cf068e711 100644 --- a/crates/madara/cairo-test-contracts/src/lib.rs +++ b/crates/madara/cairo-test-contracts/src/lib.rs @@ -3,3 +3,7 @@ pub const TEST_CONTRACT_SIERRA: &[u8] = include_bytes!("../../../../cairo/target/dev/madara_contracts_TestContract.contract_class.json"); +pub const APPCHAIN_CONTRACT_SIERRA: &[u8] = + include_bytes!("../../../../cairo/target/dev/madara_contracts_StateUpdateContract.contract_class.json"); +pub const MESSAGING_CONTRACT_SIERRA: &[u8] = + include_bytes!("../../../../cairo/target/dev/madara_contracts_MessagingContract.contract_class.json"); diff --git a/crates/madara/client/db/docs/flat_storage.md b/crates/madara/client/db/docs/flat_storage.md index fea89c68f..136d2dca6 100644 --- a/crates/madara/client/db/docs/flat_storage.md +++ b/crates/madara/client/db/docs/flat_storage.md @@ -6,14 +6,14 @@ We may want to remove that flat storage from bonsai-trie as it's not used and we Instead, we have implemented our own optimized lookup, which is implemented with a column that looks like this: -![schema flat_storage](./flat_storage.png) +![schema flat_storage](flat_storage.png) The trick here is to rely on the fact that rocksdb columns are sorted trees. The rocksdb `get` operation does not allow getting the value of (contract_1, key_1) at block 15 here, because the key has not been modified at that block. Instead, we use rocksdb iterators. -![schema flat_storage_iterator](./flat_storage_iterator.png) +![schema flat_storage_iterator](flat_storage_iterator.png) This allows us to get the most recent value for a (contract, key) at that block, by using a single rocksdb lookup. diff --git a/crates/madara/client/devnet/README.md b/crates/madara/client/devnet/README.md index 8c25f4101..e4ad0a37e 100644 --- a/crates/madara/client/devnet/README.md +++ b/crates/madara/client/devnet/README.md @@ -1,7 +1,7 @@ # Devnet predeployed contract addresses The devnet always has the predeployed contract addresses at these addresses, with these private keys. -They are OpenZeppelin contracts, see [cairo-artifacts](../../../cairo-artifacts/README.md). +They are OpenZeppelin contracts, see [cairo-artifacts](../../../../cairo-artifacts/README.md). They are also shown on node startup. ```sh diff --git a/crates/madara/client/eth/src/client.rs b/crates/madara/client/eth/src/client.rs deleted file mode 100644 index 196afbf48..000000000 --- a/crates/madara/client/eth/src/client.rs +++ /dev/null @@ -1,333 +0,0 @@ -use crate::client::StarknetCoreContract::StarknetCoreContractInstance; -use crate::utils::u256_to_felt; -use alloy::sol_types::SolEvent; -use alloy::{ - primitives::Address, - providers::{Provider, ProviderBuilder, ReqwestProvider, RootProvider}, - rpc::types::Filter, - sol, - transports::http::{Client, Http}, -}; -use mc_analytics::register_gauge_metric_instrument; -use opentelemetry::{global, KeyValue}; -use opentelemetry::{global::Error, metrics::Gauge}; - -use anyhow::{bail, Context}; -use bitvec::macros::internal::funty::Fundamental; -use starknet_types_core::felt::Felt; -use std::sync::Arc; -use url::Url; - -#[derive(Clone, Debug)] -pub struct L1BlockMetrics { - // L1 network metrics - pub l1_block_number: Gauge, - // gas price is also define in sync/metrics/block_metrics.rs but this would be the price from l1 - pub l1_gas_price_wei: Gauge, - pub l1_gas_price_strk: Gauge, -} - -impl L1BlockMetrics { - pub fn register() -> Result { - let common_scope_attributes = vec![KeyValue::new("crate", "L1 Block")]; - let eth_meter = global::meter_with_version( - "crates.l1block.opentelemetry", - Some("0.17"), - Some("https://opentelemetry.io/schemas/1.2.0"), - Some(common_scope_attributes.clone()), - ); - - let l1_block_number = register_gauge_metric_instrument( - ð_meter, - "l1_block_number".to_string(), - "Gauge for madara L1 block number".to_string(), - "".to_string(), - ); - - let l1_gas_price_wei = register_gauge_metric_instrument( - ð_meter, - "l1_gas_price_wei".to_string(), - "Gauge for madara L1 gas price in wei".to_string(), - "".to_string(), - ); - - let l1_gas_price_strk = register_gauge_metric_instrument( - ð_meter, - "l1_gas_price_strk".to_string(), - "Gauge for madara L1 gas price in strk".to_string(), - "".to_string(), - ); - - Ok(Self { l1_block_number, l1_gas_price_wei, l1_gas_price_strk }) - } -} - -// abi taken from: https://etherscan.io/address/0x6e0acfdc3cf17a7f99ed34be56c3dfb93f464e24#code -// The official starknet core contract ^ -sol!( - #[sol(rpc)] - #[derive(Debug)] - StarknetCoreContract, - "src/abis/starknet_core.json" -); - -pub struct EthereumClient { - pub provider: Arc, - pub l1_core_contract: StarknetCoreContractInstance, RootProvider>>, - pub l1_block_metrics: L1BlockMetrics, -} - -impl Clone for EthereumClient { - fn clone(&self) -> Self { - EthereumClient { - provider: Arc::clone(&self.provider), - l1_core_contract: self.l1_core_contract.clone(), - l1_block_metrics: self.l1_block_metrics.clone(), - } - } -} - -impl EthereumClient { - /// Create a new EthereumClient instance with the given RPC URL - pub async fn new(url: Url, l1_core_address: Address, l1_block_metrics: L1BlockMetrics) -> anyhow::Result { - let provider = ProviderBuilder::new().on_http(url); - - EthereumClient::assert_core_contract_exists(&provider, l1_core_address).await?; - - let core_contract = StarknetCoreContract::new(l1_core_address, provider.clone()); - - Ok(Self { provider: Arc::new(provider), l1_core_contract: core_contract, l1_block_metrics }) - } - - /// Assert that L1 Core contract exists by checking its bytecode. - async fn assert_core_contract_exists( - provider: &RootProvider>, - l1_core_address: Address, - ) -> anyhow::Result<()> { - let l1_core_contract_bytecode = provider.get_code_at(l1_core_address).await?; - if l1_core_contract_bytecode.is_empty() { - bail!("The L1 Core Contract could not be found. Check that the L2 chain matches the L1 RPC endpoint."); - } - Ok(()) - } - - /// Retrieves the latest Ethereum block number - pub async fn get_latest_block_number(&self) -> anyhow::Result { - let block_number = self.provider.get_block_number().await?.as_u64(); - Ok(block_number) - } - - /// Get the block number of the last occurrence of a given event. - pub async fn get_last_event_block_number(&self) -> anyhow::Result { - let latest_block: u64 = self.get_latest_block_number().await?; - - // Assuming an avg Block time of 15sec we check for a LogStateUpdate occurence in the last ~24h - let filter = Filter::new() - .from_block(latest_block - 6000) - .to_block(latest_block) - .address(*self.l1_core_contract.address()); - - let logs = self.provider.get_logs(&filter).await?; - - let filtered_logs = logs.into_iter().filter_map(|log| log.log_decode::().ok()).collect::>(); - - if let Some(last_log) = filtered_logs.last() { - let last_block: u64 = last_log.block_number.context("no block number in log")?; - Ok(last_block) - } else { - bail!("no event found") - } - } - - /// Get the last Starknet block number verified on L1 - pub async fn get_last_verified_block_number(&self) -> anyhow::Result { - let block_number = self.l1_core_contract.stateBlockNumber().call().await?; - let last_block_number: u64 = (block_number._0).as_u64(); - Ok(last_block_number) - } - - /// Get the last Starknet state root verified on L1 - pub async fn get_last_state_root(&self) -> anyhow::Result { - let state_root = self.l1_core_contract.stateRoot().call().await?; - u256_to_felt(state_root._0) - } - - /// Get the last Starknet block hash verified on L1 - pub async fn get_last_verified_block_hash(&self) -> anyhow::Result { - let block_hash = self.l1_core_contract.stateBlockHash().call().await?; - u256_to_felt(block_hash._0) - } -} - -#[cfg(test)] -pub mod eth_client_getter_test { - use super::*; - use alloy::{ - node_bindings::{Anvil, AnvilInstance}, - primitives::U256, - }; - - use std::ops::{Deref, Range}; - use std::sync::Mutex; - use tokio; - - // https://etherscan.io/tx/0xcadb202495cd8adba0d9b382caff907abf755cd42633d23c4988f875f2995d81#eventlog - // The txn we are referring to it is here ^ - const L1_BLOCK_NUMBER: u64 = 20395662; - const CORE_CONTRACT_ADDRESS: &str = "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4"; - const L2_BLOCK_NUMBER: u64 = 662703; - const L2_BLOCK_HASH: &str = "563216050958639290223177746678863910249919294431961492885921903486585884664"; - const L2_STATE_ROOT: &str = "1456190284387746219409791261254265303744585499659352223397867295223408682130"; - - lazy_static::lazy_static! { - static ref FORK_URL: String = std::env::var("ETH_FORK_URL").expect("ETH_FORK_URL not set"); - } - - const PORT_RANGE: Range = 19500..20000; - - struct AvailablePorts> { - to_reuse: Vec, - next: I, - } - - lazy_static::lazy_static! { - static ref AVAILABLE_PORTS: Mutex>> = Mutex::new(AvailablePorts { to_reuse: vec![], next: PORT_RANGE }); - } - pub struct AnvilPortNum(pub u16); - impl Drop for AnvilPortNum { - fn drop(&mut self) { - let mut guard = AVAILABLE_PORTS.lock().expect("poisoned lock"); - guard.to_reuse.push(self.0); - } - } - - pub fn get_port() -> AnvilPortNum { - let mut guard = AVAILABLE_PORTS.lock().expect("poisoned lock"); - if let Some(el) = guard.to_reuse.pop() { - return AnvilPortNum(el); - } - AnvilPortNum(guard.next.next().expect("no more port to use")) - } - - static ANVIL: Mutex>> = Mutex::new(None); - - /// Wrapper for an Anvil instance that automatically cleans up when all handles are dropped - pub struct AnvilHandle { - instance: Arc, - } - - impl Drop for AnvilHandle { - fn drop(&mut self) { - let mut guard = ANVIL.lock().expect("poisoned lock"); - // Check if this Arc is the last one (strong_count == 2 because of our reference - // and the one in the static) - if Arc::strong_count(&self.instance) == 2 { - println!("Cleaning up Anvil instance"); - *guard = None; - } - } - } - - impl Deref for AnvilHandle { - type Target = AnvilInstance; - - fn deref(&self) -> &Self::Target { - &self.instance - } - } - - pub fn get_shared_anvil() -> AnvilHandle { - let mut guard = ANVIL.lock().expect("poisoned lock"); - if guard.is_none() { - *guard = Some(Arc::new(create_anvil_instance())); - } - AnvilHandle { instance: Arc::clone(guard.as_ref().unwrap()) } - } - - pub fn create_anvil_instance() -> AnvilInstance { - let port = get_port(); - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(port.0) - .timeout(60_000) - .try_spawn() - .expect("failed to spawn anvil instance"); - println!("Anvil started and running at `{}`", anvil.endpoint()); - anvil - } - - pub fn create_ethereum_client(url: Option<&str>) -> EthereumClient { - let rpc_url: Url = url.unwrap_or("http://localhost:8545").parse().expect("issue while parsing URL"); - - let provider = ProviderBuilder::new().on_http(rpc_url.clone()); - let address = Address::parse_checksummed(CORE_CONTRACT_ADDRESS, None).unwrap(); - let contract = StarknetCoreContract::new(address, provider.clone()); - - let l1_block_metrics = L1BlockMetrics::register().unwrap(); - - EthereumClient { provider: Arc::new(provider), l1_core_contract: contract.clone(), l1_block_metrics } - } - - #[tokio::test] - async fn fail_create_new_client_invalid_core_contract() { - let anvil = get_shared_anvil(); - // Sepolia core contract instead of mainnet - const INVALID_CORE_CONTRACT_ADDRESS: &str = "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057"; - - let rpc_url: Url = anvil.endpoint_url(); - - let core_contract_address = Address::parse_checksummed(INVALID_CORE_CONTRACT_ADDRESS, None).unwrap(); - let l1_block_metrics = L1BlockMetrics::register().unwrap(); - - let new_client_result = EthereumClient::new(rpc_url, core_contract_address, l1_block_metrics).await; - assert!(new_client_result.is_err(), "EthereumClient::new should fail with an invalid core contract address"); - } - - #[tokio::test] - async fn get_latest_block_number_works() { - let anvil = get_shared_anvil(); - let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); - let block_number = - eth_client.provider.get_block_number().await.expect("issue while fetching the block number").as_u64(); - assert_eq!(block_number, L1_BLOCK_NUMBER, "provider unable to get the correct block number"); - } - - #[tokio::test] - async fn get_last_event_block_number_works() { - let anvil = get_shared_anvil(); - let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); - let block_number = eth_client - .get_last_event_block_number::() - .await - .expect("issue while getting the last block number with given event"); - assert_eq!(block_number, L1_BLOCK_NUMBER, "block number with given event not matching"); - } - - #[tokio::test] - async fn get_last_verified_block_hash_works() { - let anvil = get_shared_anvil(); - let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); - let block_hash = - eth_client.get_last_verified_block_hash().await.expect("issue while getting the last verified block hash"); - let expected = u256_to_felt(U256::from_str_radix(L2_BLOCK_HASH, 10).unwrap()).unwrap(); - assert_eq!(block_hash, expected, "latest block hash not matching"); - } - - #[tokio::test] - async fn get_last_state_root_works() { - let anvil = get_shared_anvil(); - let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); - let state_root = eth_client.get_last_state_root().await.expect("issue while getting the state root"); - let expected = u256_to_felt(U256::from_str_radix(L2_STATE_ROOT, 10).unwrap()).unwrap(); - assert_eq!(state_root, expected, "latest block state root not matching"); - } - - #[tokio::test] - async fn get_last_verified_block_number_works() { - let anvil = get_shared_anvil(); - let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); - let block_number = eth_client.get_last_verified_block_number().await.expect("issue"); - assert_eq!(block_number, L2_BLOCK_NUMBER, "verified block number not matching"); - } -} diff --git a/crates/madara/client/eth/src/l1_messaging.rs b/crates/madara/client/eth/src/l1_messaging.rs deleted file mode 100644 index 52b4af78e..000000000 --- a/crates/madara/client/eth/src/l1_messaging.rs +++ /dev/null @@ -1,582 +0,0 @@ -use crate::client::StarknetCoreContract::LogMessageToL2; -use crate::client::{EthereumClient, StarknetCoreContract}; -use crate::utils::u256_to_felt; -use alloy::eips::BlockNumberOrTag; -use alloy::primitives::{keccak256, FixedBytes, U256}; -use alloy::sol_types::SolValue; -use anyhow::Context; -use futures::StreamExt; -use mc_db::{l1_db::LastSyncedEventBlock, MadaraBackend}; -use mc_mempool::{Mempool, MempoolProvider}; -use mp_utils::service::ServiceContext; -use starknet_api::core::{ChainId, ContractAddress, EntryPointSelector, Nonce}; -use starknet_api::transaction::{Calldata, L1HandlerTransaction, TransactionVersion}; -use starknet_types_core::felt::Felt; -use std::sync::Arc; - -impl EthereumClient { - /// Get cancellation status of an L1 to L2 message - /// - /// This function query the core contract to know if a L1->L2 message has been cancelled - /// # Arguments - /// - /// - msg_hash : Hash of L1 to L2 message - /// - /// # Return - /// - /// - A felt representing a timestamp : - /// - 0 if the message has not been cancelled - /// - timestamp of the cancellation if it has been cancelled - /// - An Error if the call fail - pub async fn get_l1_to_l2_message_cancellations(&self, msg_hash: FixedBytes<32>) -> anyhow::Result { - //l1ToL2MessageCancellations - let cancellation_timestamp = self.l1_core_contract.l1ToL2MessageCancellations(msg_hash).call().await?; - u256_to_felt(cancellation_timestamp._0) - } -} - -pub async fn sync( - backend: Arc, - client: Arc, - chain_id: ChainId, - mempool: Arc, - mut ctx: ServiceContext, -) -> anyhow::Result<()> { - tracing::info!("⟠ Starting L1 Messages Syncing..."); - - let last_synced_event_block = match backend.messaging_last_synced_l1_block_with_event() { - Ok(Some(blk)) => blk, - Ok(None) => { - unreachable!("Should never be None") - } - Err(e) => { - tracing::error!("⟠ Madara Messaging DB unavailable: {:?}", e); - return Err(e.into()); - } - }; - let event_filter = client.l1_core_contract.event_filter::(); - - let mut event_stream = event_filter - .from_block(last_synced_event_block.block_number) - .to_block(BlockNumberOrTag::Finalized) - .watch() - .await - .context( - "Failed to watch event filter - Ensure you are using an L1 RPC endpoint that points to an archive node", - )? - .into_stream(); - - while let Some(Some(event_result)) = ctx.run_until_cancelled(event_stream.next()).await { - if let Ok((event, meta)) = event_result { - tracing::info!( - "⟠ Processing L1 Message from block: {:?}, transaction_hash: {:?}, log_index: {:?}, fromAddress: {:?}", - meta.block_number, - meta.transaction_hash, - meta.log_index, - event.fromAddress - ); - - // Check if cancellation was initiated - let event_hash = get_l1_to_l2_msg_hash(&event)?; - tracing::info!("⟠ Checking for cancelation, event hash : {:?}", event_hash); - let cancellation_timestamp = client.get_l1_to_l2_message_cancellations(event_hash).await?; - if cancellation_timestamp != Felt::ZERO { - tracing::info!("⟠ L1 Message was cancelled in block at timestamp : {:?}", cancellation_timestamp); - let tx_nonce = Nonce(u256_to_felt(event.nonce)?); - // cancelled message nonce should be inserted to avoid reprocessing - match backend.has_l1_messaging_nonce(tx_nonce) { - Ok(false) => { - backend.set_l1_messaging_nonce(tx_nonce)?; - } - Ok(true) => {} - Err(e) => { - tracing::error!("⟠ Unexpected DB error: {:?}", e); - return Err(e.into()); - } - }; - continue; - } - - match process_l1_message(&backend, &event, &meta.block_number, &meta.log_index, &chain_id, mempool.clone()) - .await - { - Ok(Some(tx_hash)) => { - tracing::info!( - "⟠ L1 Message from block: {:?}, transaction_hash: {:?}, log_index: {:?} submitted, \ - transaction hash on L2: {:?}", - meta.block_number, - meta.transaction_hash, - meta.log_index, - tx_hash - ); - } - Ok(None) => {} - Err(e) => { - tracing::error!( - "⟠ Unexpected error while processing L1 Message from block: {:?}, transaction_hash: {:?}, \ - log_index: {:?}, error: {:?}", - meta.block_number, - meta.transaction_hash, - meta.log_index, - e - ) - } - } - } - } - - Ok(()) -} - -async fn process_l1_message( - backend: &MadaraBackend, - event: &LogMessageToL2, - l1_block_number: &Option, - event_index: &Option, - _chain_id: &ChainId, - mempool: Arc, -) -> anyhow::Result> { - let transaction = parse_handle_l1_message_transaction(event)?; - let tx_nonce = transaction.nonce; - let fees: u128 = event.fee.try_into()?; - - // Ensure that L1 message has not been executed - match backend.has_l1_messaging_nonce(tx_nonce) { - Ok(false) => { - backend.set_l1_messaging_nonce(tx_nonce)?; - } - Ok(true) => { - tracing::debug!("⟠ Event already processed: {:?}", transaction); - return Ok(None); - } - Err(e) => { - tracing::error!("⟠ Unexpected DB error: {:?}", e); - return Err(e.into()); - } - }; - - let res = mempool.tx_accept_l1_handler(transaction.into(), fees)?; - - // TODO: remove unwraps - // Ques: shall it panic if no block number of event_index? - let block_sent = LastSyncedEventBlock::new(l1_block_number.unwrap(), event_index.unwrap()); - backend.messaging_update_last_synced_l1_block_with_event(block_sent)?; - - Ok(Some(res.transaction_hash)) -} - -pub fn parse_handle_l1_message_transaction(event: &LogMessageToL2) -> anyhow::Result { - // L1 from address. - let from_address = u256_to_felt(event.fromAddress.into_word().into())?; - - // L2 contract to call. - let contract_address = u256_to_felt(event.toAddress)?; - - // Function of the contract to call. - let entry_point_selector = u256_to_felt(event.selector)?; - - // L1 message nonce. - let nonce = u256_to_felt(event.nonce)?; - - let event_payload = event.payload.clone().into_iter().map(u256_to_felt).collect::>>()?; - - let calldata: Calldata = { - let mut calldata: Vec<_> = Vec::with_capacity(event.payload.len() + 1); - calldata.push(from_address); - calldata.extend(event_payload); - - Calldata(Arc::new(calldata)) - }; - - Ok(L1HandlerTransaction { - nonce: Nonce(nonce), - contract_address: ContractAddress(contract_address.try_into()?), - entry_point_selector: EntryPointSelector(entry_point_selector), - calldata, - version: TransactionVersion(Felt::ZERO), - }) -} - -/// Computes the message hashed with the given event data -fn get_l1_to_l2_msg_hash(event: &LogMessageToL2) -> anyhow::Result> { - let data = ( - [0u8; 12], - event.fromAddress.0 .0, - event.toAddress, - event.nonce, - event.selector, - U256::from(event.payload.len()), - event.payload.clone(), - ); - Ok(keccak256(data.abi_encode_packed())) -} - -#[cfg(test)] -mod l1_messaging_tests { - - use std::{sync::Arc, time::Duration}; - - use crate::l1_messaging::sync; - use crate::{ - client::{ - EthereumClient, L1BlockMetrics, - StarknetCoreContract::{self, LogMessageToL2}, - }, - l1_messaging::get_l1_to_l2_msg_hash, - utils::felt_to_u256, - }; - use alloy::{ - hex::FromHex, - node_bindings::{Anvil, AnvilInstance}, - primitives::{Address, U256}, - providers::{ProviderBuilder, RootProvider}, - sol, - transports::http::{Client, Http}, - }; - use mc_db::DatabaseService; - use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; - use mp_chain_config::ChainConfig; - use mp_utils::service::ServiceContext; - use rstest::*; - use starknet_api::core::Nonce; - use starknet_types_core::felt::Felt; - use tempfile::TempDir; - use tracing_test::traced_test; - use url::Url; - - use self::DummyContract::DummyContractInstance; - - struct TestRunner { - #[allow(dead_code)] - anvil: AnvilInstance, // Not used but needs to stay in scope otherwise it will be dropped - chain_config: Arc, - db_service: Arc, - dummy_contract: DummyContractInstance, RootProvider>>, - eth_client: EthereumClient, - mempool: Arc, - } - - // LogMessageToL2 from https://etherscan.io/tx/0x21980d6674d33e50deee43c6c30ef3b439bd148249b4539ce37b7856ac46b843 - // bytecode is compiled DummyContractBasicTestCase - sol!( - #[derive(Debug)] - #[sol(rpc, bytecode="6080604052348015600e575f80fd5b506108258061001c5f395ff3fe608060405234801561000f575f80fd5b506004361061004a575f3560e01c80634185df151461004e57806390985ef9146100585780639be446bf14610076578063af56443a146100a6575b5f80fd5b6100566100c2565b005b61006061013b565b60405161006d9190610488565b60405180910390f35b610090600480360381019061008b91906104cf565b6101ac565b60405161009d9190610512565b60405180910390f35b6100c060048036038101906100bb9190610560565b6101d8565b005b5f6100cb6101f3565b905080604001518160200151825f015173ffffffffffffffffffffffffffffffffffffffff167fdb80dd488acf86d17c747445b0eabb5d57c541d3bd7b6b87af987858e5066b2b846060015185608001518660a0015160405161013093929190610642565b60405180910390a450565b5f806101456101f3565b9050805f015173ffffffffffffffffffffffffffffffffffffffff1681602001518260800151836040015184606001515185606001516040516020016101909695949392919061072a565b6040516020818303038152906040528051906020012091505090565b5f805f9054906101000a900460ff166101c5575f6101cb565b6366b4f1055b63ffffffff169050919050565b805f806101000a81548160ff02191690831515021790555050565b6101fb610429565b5f73ae0ee0a63a2ce6baeeffe56e7714fb4efe48d41990505f7f073314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b8290505f7f01b64b1b3b690b43b9b514fb81377518f4039cd3e4f4914d8a6bdf01d679fb1990505f600767ffffffffffffffff81111561027757610276610795565b5b6040519080825280602002602001820160405280156102a55781602001602082028036833780820191505090505b5090506060815f815181106102bd576102bc6107c2565b5b60200260200101818152505062195091816001815181106102e1576102e06107c2565b5b60200260200101818152505065231594f0c7ea81600281518110610308576103076107c2565b5b60200260200101818152505060058160038151811061032a576103296107c2565b5b602002602001018181525050624554488160048151811061034e5761034d6107c2565b5b60200260200101818152505073bdb193c166cfb7be2e51711c5648ebeef94063bb81600581518110610383576103826107c2565b5b6020026020010181815250507e7d79cd86ba27a2508a9ca55c8b3474ca082bc5173d0467824f07a32e9db888816006815181106103c3576103c26107c2565b5b6020026020010181815250505f662386f26fc1000090505f6040518060c001604052808773ffffffffffffffffffffffffffffffffffffffff16815260200186815260200185815260200184815260200183815260200182815250965050505050505090565b6040518060c001604052805f73ffffffffffffffffffffffffffffffffffffffff1681526020015f81526020015f8152602001606081526020015f81526020015f81525090565b5f819050919050565b61048281610470565b82525050565b5f60208201905061049b5f830184610479565b92915050565b5f80fd5b6104ae81610470565b81146104b8575f80fd5b50565b5f813590506104c9816104a5565b92915050565b5f602082840312156104e4576104e36104a1565b5b5f6104f1848285016104bb565b91505092915050565b5f819050919050565b61050c816104fa565b82525050565b5f6020820190506105255f830184610503565b92915050565b5f8115159050919050565b61053f8161052b565b8114610549575f80fd5b50565b5f8135905061055a81610536565b92915050565b5f60208284031215610575576105746104a1565b5b5f6105828482850161054c565b91505092915050565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b6105bd816104fa565b82525050565b5f6105ce83836105b4565b60208301905092915050565b5f602082019050919050565b5f6105f08261058b565b6105fa8185610595565b9350610605836105a5565b805f5b8381101561063557815161061c88826105c3565b9750610627836105da565b925050600181019050610608565b5085935050505092915050565b5f6060820190508181035f83015261065a81866105e6565b90506106696020830185610503565b6106766040830184610503565b949350505050565b5f819050919050565b610698610693826104fa565b61067e565b82525050565b5f81905092915050565b6106b1816104fa565b82525050565b5f6106c283836106a8565b60208301905092915050565b5f6106d88261058b565b6106e2818561069e565b93506106ed836105a5565b805f5b8381101561071d57815161070488826106b7565b975061070f836105da565b9250506001810190506106f0565b5085935050505092915050565b5f6107358289610687565b6020820191506107458288610687565b6020820191506107558287610687565b6020820191506107658286610687565b6020820191506107758285610687565b60208201915061078582846106ce565b9150819050979650505050505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffdfea2646970667358221220ddc41ccc2cc8b33e1f608fb6cabf9ead1150daa8798e94e03ce9cd61e0d9389164736f6c634300081a0033")] - contract DummyContract { - bool isCanceled; - event LogMessageToL2(address indexed _fromAddress, uint256 indexed _toAddress, uint256 indexed _selector, uint256[] payload, uint256 nonce, uint256 fee); - - struct MessageData { - address fromAddress; - uint256 toAddress; - uint256 selector; - uint256[] payload; - uint256 nonce; - uint256 fee; - } - - function getMessageData() internal pure returns (MessageData memory) { - address fromAddress = address(993696174272377493693496825928908586134624850969); - uint256 toAddress = 3256441166037631918262930812410838598500200462657642943867372734773841898370; - uint256 selector = 774397379524139446221206168840917193112228400237242521560346153613428128537; - uint256[] memory payload = new uint256[](7); - payload[0] = 96; - payload[1] = 1659025; - payload[2] = 38575600093162; - payload[3] = 5; - payload[4] = 4543560; - payload[5] = 1082959358903034162641917759097118582889062097851; - payload[6] = 221696535382753200248526706088340988821219073423817576256483558730535647368; - uint256 nonce = 10000000000000000; - uint256 fee = 0; - - return MessageData(fromAddress, toAddress, selector, payload, nonce, fee); - } - - function fireEvent() public { - MessageData memory data = getMessageData(); - emit LogMessageToL2(data.fromAddress, data.toAddress, data.selector, data.payload, data.nonce, data.fee); - } - - function l1ToL2MessageCancellations(bytes32 msgHash) external view returns (uint256) { - return isCanceled ? 1723134213 : 0; - } - - function setIsCanceled(bool value) public { - isCanceled = value; - } - - function getL1ToL2MsgHash() external pure returns (bytes32) { - MessageData memory data = getMessageData(); - return keccak256( - abi.encodePacked( - uint256(uint160(data.fromAddress)), - data.toAddress, - data.nonce, - data.selector, - data.payload.length, - data.payload - ) - ); - } - } - ); - - /// Common setup for tests - /// - /// This test performs the following steps: - /// 1. Sets up test environemment - /// 2. Starts worker - /// 3. Fires a Message event from the dummy contract - /// 4. Waits for event to be processed - /// 5. Assert that the worker handle the event with correct data - /// 6. Assert that the hash computed by the worker is correct - /// 7. TODO : Assert that the tx is succesfully submited to the mempool - /// 8. Assert that the event is successfully pushed to the db - /// 9. TODO : Assert that the tx was correctly executed - #[fixture] - async fn setup_test_env() -> TestRunner { - // Start Anvil instance - let anvil = Anvil::new().block_time(1).chain_id(1337).try_spawn().expect("failed to spawn anvil instance"); - println!("Anvil started and running at `{}`", anvil.endpoint()); - - // Set up chain info - let chain_config = Arc::new(ChainConfig::madara_test()); - - // Set up database paths - let temp_dir = TempDir::new().expect("issue while creating temporary directory"); - let base_path = temp_dir.path().join("data"); - let backup_dir = Some(temp_dir.path().join("backups")); - - // Initialize database service - let db = Arc::new( - DatabaseService::new(&base_path, backup_dir, false, chain_config.clone(), Default::default()) - .await - .expect("Failed to create database service"), - ); - - let l1_gas_setter = GasPriceProvider::new(); - let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); - - let mempool = Arc::new(Mempool::new( - Arc::clone(db.backend()), - Arc::clone(&l1_data_provider), - MempoolLimits::for_testing(), - )); - - // Set up metrics service - let l1_block_metrics = L1BlockMetrics::register().unwrap(); - - // Set up provider - let rpc_url: Url = anvil.endpoint().parse().expect("issue while parsing"); - let provider = ProviderBuilder::new().on_http(rpc_url); - - // Set up dummy contract - let contract = DummyContract::deploy(provider.clone()).await.unwrap(); - - let core_contract = StarknetCoreContract::new(*contract.address(), provider.clone()); - - let eth_client = EthereumClient { - provider: Arc::new(provider.clone()), - l1_core_contract: core_contract.clone(), - l1_block_metrics: l1_block_metrics.clone(), - }; - - TestRunner { anvil, chain_config, db_service: db, dummy_contract: contract, eth_client, mempool } - } - - /// Test the basic workflow of l1 -> l2 messaging - /// - /// This test performs the following steps: - /// 1. Sets up test environemment - /// 2. Starts worker - /// 3. Fires a Message event from the dummy contract - /// 4. Waits for event to be processed - /// 5. Assert that the worker handle the event with correct data - /// 6. Assert that the hash computed by the worker is correct - /// 7. TODO : Assert that the tx is succesfully submited to the mempool - /// 8. Assert that the event is successfully pushed to the db - /// 9. TODO : Assert that the tx was correctly executed - #[rstest] - #[traced_test] - #[tokio::test] - async fn e2e_test_basic_workflow(#[future] setup_test_env: TestRunner) { - let TestRunner { chain_config, db_service: db, dummy_contract: contract, eth_client, anvil: _anvil, mempool } = - setup_test_env.await; - - // Start worker - let worker_handle = { - let db = Arc::clone(&db); - tokio::spawn(async move { - sync( - Arc::clone(db.backend()), - Arc::new(eth_client), - chain_config.chain_id.clone(), - mempool, - ServiceContext::new_for_testing(), - ) - .await - }) - }; - - let _ = contract.setIsCanceled(false).send().await; - // Send a Event and wait for processing, Panic if fail - let _ = contract.fireEvent().send().await.expect("Failed to fire event"); - tokio::time::sleep(Duration::from_secs(5)).await; - - // Assert that event was caught by the worker with correct data - // TODO: Maybe add some more assert - assert!(logs_contain("fromAddress: 0xae0ee0a63a2ce6baeeffe56e7714fb4efe48d419")); - - // Assert the tx hash computed by the worker is correct - assert!(logs_contain( - format!("event hash : {:?}", contract.getL1ToL2MsgHash().call().await.expect("failed to get hash")._0) - .as_str() - )); - - // TODO : Assert that the tx has been included in the mempool - - // Assert that the event is well stored in db - let last_block = - db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); - assert_ne!(last_block.block_number, 0); - let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); - assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); - // TODO : Assert that the tx was correctly executed - - // Explicitly cancel the listen task, else it would be running in the background - worker_handle.abort(); - } - - /// Test the workflow of l1 -> l2 messaging with duplicate event - /// - /// This test performs the following steps: - /// 1. Sets up test environemment - /// 2. Starts worker - /// 3. Fires a Message event from the dummy contract - /// 4. Waits for event to be processed - /// 5. Assert that the event is well stored in db - /// 6. Fires a Message with the same event from the dummy contract - /// 7. Assert that the last event stored is the first one - #[rstest] - #[traced_test] - #[tokio::test] - async fn e2e_test_already_processed_event(#[future] setup_test_env: TestRunner) { - let TestRunner { chain_config, db_service: db, dummy_contract: contract, eth_client, anvil: _anvil, mempool } = - setup_test_env.await; - - // Start worker - let worker_handle = { - let db = Arc::clone(&db); - tokio::spawn(async move { - sync( - Arc::clone(db.backend()), - Arc::new(eth_client), - chain_config.chain_id.clone(), - mempool, - ServiceContext::new_for_testing(), - ) - .await - }) - }; - - let _ = contract.setIsCanceled(false).send().await; - let _ = contract.fireEvent().send().await.expect("Failed to fire event"); - tokio::time::sleep(Duration::from_secs(5)).await; - let last_block = - db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); - assert_ne!(last_block.block_number, 0); - let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); - assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); - - // Send the event a second time - let _ = contract.fireEvent().send().await.expect("Failed to fire event"); - tokio::time::sleep(Duration::from_secs(5)).await; - // Assert that the last event in db is still the same as it is already processed (same nonce) - assert_eq!( - last_block.block_number, - db.backend() - .messaging_last_synced_l1_block_with_event() - .expect("failed to retrieve block") - .unwrap() - .block_number - ); - assert!(logs_contain("Event already processed")); - - worker_handle.abort(); - } - - /// Test the workflow of l1 -> l2 messaging with message cancelled - /// - /// This test performs the following steps: - /// 1. Sets up test environemment - /// 2. Starts worker - /// 3. Fires a Message event from the dummy contract - /// 4. Waits for event to be processed - /// 5. Assert that the event is not stored in db - #[rstest] - #[traced_test] - #[tokio::test] - async fn e2e_test_message_canceled(#[future] setup_test_env: TestRunner) { - let TestRunner { chain_config, db_service: db, dummy_contract: contract, eth_client, anvil: _anvil, mempool } = - setup_test_env.await; - - // Start worker - let worker_handle = { - let db = Arc::clone(&db); - tokio::spawn(async move { - sync( - Arc::clone(db.backend()), - Arc::new(eth_client), - chain_config.chain_id.clone(), - mempool, - ServiceContext::new_for_testing(), - ) - .await - }) - }; - - // Mock cancelled message - let _ = contract.setIsCanceled(true).send().await; - let _ = contract.fireEvent().send().await.expect("Failed to fire event"); - tokio::time::sleep(Duration::from_secs(5)).await; - let last_block = - db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); - assert_eq!(last_block.block_number, 0); - let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); - // cancelled message nonce should be inserted to avoid reprocessing - assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); - assert!(logs_contain("L1 Message was cancelled in block at timestamp : 0x66b4f105")); - - worker_handle.abort(); - } - - /// Test taken from starknet.rs to ensure consistency - /// https://github.com/xJonathanLEI/starknet-rs/blob/2ddc69479d326ed154df438d22f2d720fbba746e/starknet-core/src/types/msg.rs#L96 - #[test] - fn test_msg_to_l2_hash() { - let msg = get_l1_to_l2_msg_hash(&LogMessageToL2 { - fromAddress: Address::from_hex("c3511006C04EF1d78af4C8E0e74Ec18A6E64Ff9e").unwrap(), - toAddress: felt_to_u256( - Felt::from_hex("0x73314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b82").unwrap(), - ), - selector: felt_to_u256( - Felt::from_hex("0x2d757788a8d8d6f21d1cd40bce38a8222d70654214e96ff95d8086e684fbee5").unwrap(), - ), - payload: vec![ - felt_to_u256( - Felt::from_hex("0x689ead7d814e51ed93644bc145f0754839b8dcb340027ce0c30953f38f55d7").unwrap(), - ), - felt_to_u256(Felt::from_hex("0x2c68af0bb140000").unwrap()), - felt_to_u256(Felt::from_hex("0x0").unwrap()), - ], - nonce: U256::from(775628), - fee: U256::ZERO, - }) - .expect("Failed to compute l1 to l2 msg hash"); - - let expected_hash = - <[u8; 32]>::from_hex("c51a543ef9563ad2545342b390b67edfcddf9886aa36846cf70382362fc5fab3").unwrap(); - - assert_eq!(msg.0, expected_hash); - } -} diff --git a/crates/madara/client/eth/src/state_update.rs b/crates/madara/client/eth/src/state_update.rs deleted file mode 100644 index 2164f9bf4..000000000 --- a/crates/madara/client/eth/src/state_update.rs +++ /dev/null @@ -1,189 +0,0 @@ -use std::sync::Arc; - -use crate::client::{L1BlockMetrics, StarknetCoreContract}; -use crate::{client::EthereumClient, utils::convert_log_state_update}; -use anyhow::Context; -use futures::StreamExt; -use mc_db::MadaraBackend; -use mp_utils::service::ServiceContext; -use mp_utils::trim_hash; -use serde::Deserialize; -use starknet_types_core::felt::Felt; - -const ERR_ARCHIVE: &str = - "Failed to watch event filter - Ensure you are using an L1 RPC endpoint that points to an archive node"; - -#[derive(Debug, Clone, Deserialize, PartialEq)] -pub struct L1StateUpdate { - pub block_number: u64, - pub global_root: Felt, - pub block_hash: Felt, -} - -/// Get the last Starknet state update verified on the L1 -pub async fn get_initial_state(client: &EthereumClient) -> anyhow::Result { - let block_number = client.get_last_verified_block_number().await?; - let block_hash = client.get_last_verified_block_hash().await?; - let global_root = client.get_last_state_root().await?; - - Ok(L1StateUpdate { global_root, block_number, block_hash }) -} - -pub fn update_l1( - backend: &MadaraBackend, - state_update: L1StateUpdate, - block_metrics: &L1BlockMetrics, -) -> anyhow::Result<()> { - tracing::info!( - "🔄 Updated L1 head #{} ({}) with state root ({})", - state_update.block_number, - trim_hash(&state_update.block_hash), - trim_hash(&state_update.global_root) - ); - - block_metrics.l1_block_number.record(state_update.block_number, &[]); - - backend.write_last_confirmed_block(state_update.block_number).context("Setting l1 last confirmed block number")?; - tracing::debug!("update_l1: wrote last confirmed block number"); - - Ok(()) -} - -pub async fn state_update_worker( - backend: Arc, - eth_client: Arc, - mut ctx: ServiceContext, -) -> anyhow::Result<()> { - // Clear L1 confirmed block at startup - backend.clear_last_confirmed_block().context("Clearing l1 last confirmed block number")?; - tracing::debug!("update_l1: cleared confirmed block number"); - - tracing::info!("🚀 Subscribed to L1 state verification"); - // This does not seem to play well with anvil - #[cfg(not(test))] - { - let initial_state = get_initial_state(ð_client).await.context("Getting initial ethereum state")?; - update_l1(&backend, initial_state, ð_client.l1_block_metrics)?; - } - - // Listen to LogStateUpdate (0x77552641) update and send changes continuously - let event_filter = eth_client.l1_core_contract.event_filter::(); - - let mut event_stream = match ctx.run_until_cancelled(event_filter.watch()).await { - Some(res) => res.context(ERR_ARCHIVE)?.into_stream(), - None => return anyhow::Ok(()), - }; - - while let Some(Some(event_result)) = ctx.run_until_cancelled(event_stream.next()).await { - let log = event_result.context("listening for events")?; - let format_event: L1StateUpdate = - convert_log_state_update(log.0.clone()).context("formatting event into an L1StateUpdate")?; - update_l1(&backend, format_event, ð_client.l1_block_metrics)?; - } - - anyhow::Ok(()) -} - -#[cfg(test)] -mod eth_client_event_subscription_test { - use super::*; - use std::{sync::Arc, time::Duration}; - - use alloy::{node_bindings::Anvil, providers::ProviderBuilder, sol}; - use mc_db::DatabaseService; - use mp_chain_config::ChainConfig; - use rstest::*; - use tempfile::TempDir; - use url::Url; - - sol!( - #[sol(rpc, bytecode="6080604052348015600e575f80fd5b506101618061001c5f395ff3fe608060405234801561000f575f80fd5b5060043610610029575f3560e01c80634185df151461002d575b5f80fd5b610035610037565b005b5f7f0639349b21e886487cd6b341de2050db8ab202d9c6b0e7a2666d598e5fcf81a690505f620a1caf90505f7f0279b69383ea92624c1ae4378ac7fae6428f47bbd21047ea0290c3653064188590507fd342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576c8383836040516100b9939291906100f6565b60405180910390a1505050565b5f819050919050565b6100d8816100c6565b82525050565b5f819050919050565b6100f0816100de565b82525050565b5f6060820190506101095f8301866100cf565b61011660208301856100e7565b61012360408301846100cf565b94935050505056fea2646970667358221220fbc6fd165c86ed9af0c5fcab2830d4a72894fd6a98e9c16dbf9101c4c22e2f7d64736f6c634300081a0033")] - contract DummyContract { - event LogStateUpdate(uint256 globalRoot, int256 blockNumber, uint256 blockHash); - - function fireEvent() public { - uint256 globalRoot = 2814950447364693428789615812443623689251959344851195711990387747563915674022; - int256 blockNumber = 662703; - uint256 blockHash = 1119674286844400689540394420005977072742999649767515920196535047615668295813; - - emit LogStateUpdate(globalRoot, blockNumber, blockHash); - } - } - ); - - const L2_BLOCK_NUMBER: u64 = 662703; - const ANOTHER_ANVIL_PORT: u16 = 8548; - const EVENT_PROCESSING_TIME: u64 = 2; // Time to allow for event processing in seconds - - /// Test the event subscription and state update functionality - /// - /// This test performs the following steps: - /// 1. Sets up a mock Ethereum environment using Anvil - /// 2. Initializes necessary services (Database, Metrics) - /// 3. Deploys a dummy contract and sets up an Ethereum client - /// 4. Starts listening for state updates - /// 5. Fires an event from the dummy contract - /// 6. Waits for event processing and verifies the block number - #[rstest] - #[tokio::test] - async fn listen_and_update_state_when_event_fired_works() { - // Start Anvil instance - let anvil = Anvil::new() - .block_time(1) - .chain_id(1337) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("failed to spawn anvil instance"); - println!("Anvil started and running at `{}`", anvil.endpoint()); - - // Set up chain info - let chain_info = Arc::new(ChainConfig::madara_test()); - - // Set up database paths - let temp_dir = TempDir::new().expect("issue while creating temporary directory"); - let base_path = temp_dir.path().join("data"); - let backup_dir = Some(temp_dir.path().join("backups")); - - // Initialize database service - let db = Arc::new( - DatabaseService::new(&base_path, backup_dir, false, chain_info.clone(), Default::default()) - .await - .expect("Failed to create database service"), - ); - - // Set up metrics service - let l1_block_metrics = L1BlockMetrics::register().unwrap(); - - let rpc_url: Url = anvil.endpoint().parse().expect("issue while parsing"); - let provider = ProviderBuilder::new().on_http(rpc_url); - - let contract = DummyContract::deploy(provider.clone()).await.unwrap(); - let core_contract = StarknetCoreContract::new(*contract.address(), provider.clone()); - - let eth_client = - EthereumClient { provider: Arc::new(provider), l1_core_contract: core_contract.clone(), l1_block_metrics }; - - // Start listening for state updates - let listen_handle = { - let db = Arc::clone(&db); - tokio::spawn(async move { - state_update_worker(Arc::clone(db.backend()), Arc::new(eth_client), ServiceContext::new_for_testing()) - .await - .unwrap() - }) - }; - - let _ = contract.fireEvent().send().await.expect("Failed to fire event"); - - // Wait for event processing - tokio::time::sleep(Duration::from_secs(EVENT_PROCESSING_TIME)).await; - - // Verify the block number - let block_in_db = - db.backend().get_l1_last_confirmed_block().expect("Failed to get L1 last confirmed block number"); - - // Explicitly cancel the listen task, else it would be running in the background - listen_handle.abort(); - assert_eq!(block_in_db, Some(L2_BLOCK_NUMBER), "Block in DB does not match expected L2 block number"); - } -} diff --git a/crates/madara/client/eth/src/sync.rs b/crates/madara/client/eth/src/sync.rs deleted file mode 100644 index 2a607fcde..000000000 --- a/crates/madara/client/eth/src/sync.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::client::EthereumClient; -use crate::l1_gas_price::gas_price_worker; -use crate::l1_messaging::sync; -use crate::state_update::state_update_worker; -use mc_mempool::{GasPriceProvider, Mempool}; -use mp_utils::service::ServiceContext; -use starknet_api::core::ChainId; -use std::sync::Arc; -use std::time::Duration; - -use mc_db::MadaraBackend; - -#[allow(clippy::too_many_arguments)] -pub async fn l1_sync_worker( - backend: Arc, - eth_client: Arc, - chain_id: ChainId, - l1_gas_provider: GasPriceProvider, - gas_price_sync_disabled: bool, - gas_price_poll_ms: Duration, - mempool: Arc, - ctx: ServiceContext, -) -> anyhow::Result<()> { - let mut join_set = tokio::task::JoinSet::new(); - - join_set.spawn(state_update_worker(Arc::clone(&backend), Arc::clone(ð_client), ctx.clone())); - join_set.spawn(sync(Arc::clone(&backend), Arc::clone(ð_client), chain_id, mempool, ctx.clone())); - - if !gas_price_sync_disabled { - join_set.spawn(gas_price_worker(Arc::clone(ð_client), l1_gas_provider, gas_price_poll_ms, ctx.clone())); - } - - while let Some(res) = join_set.join_next().await { - res??; - } - - Ok(()) -} diff --git a/crates/madara/client/eth/Cargo.toml b/crates/madara/client/settlement_client/Cargo.toml similarity index 80% rename from crates/madara/client/eth/Cargo.toml rename to crates/madara/client/settlement_client/Cargo.toml index c944fdbb2..7590c89de 100644 --- a/crates/madara/client/eth/Cargo.toml +++ b/crates/madara/client/settlement_client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "This crate is responsible to handle l1 communication" -name = "mc-eth" +name = "mc-settlement-client" authors.workspace = true edition.workspace = true license.workspace = true @@ -35,13 +35,20 @@ starknet_api.workspace = true # Other alloy.workspace = true anyhow.workspace = true +assert_matches = "1.5.0" bigdecimal.workspace = true bitvec.workspace = true futures = { workspace = true, default-features = true } - +mockall.workspace = true regex = "1.10.5" serde = { workspace = true, default-features = true } serde_json = "1" +starknet-accounts = "0.11.0" +starknet-contract = "0.11.0" +starknet-core = { workspace = true } +starknet-crypto = { workspace = true } +starknet-providers = { workspace = true } +starknet-signers = { workspace = true } thiserror.workspace = true time = "0.3.36" tokio = { workspace = true, features = [ @@ -54,6 +61,10 @@ url.workspace = true #Instrumentation +async-trait = { workspace = true } +hex = "0.4.3" +log = "0.4.22" +matchers = "0.1.0" opentelemetry = { workspace = true, features = ["metrics", "logs"] } opentelemetry-appender-tracing = { workspace = true, default-features = false } opentelemetry-otlp = { workspace = true, features = [ @@ -64,6 +75,7 @@ opentelemetry-otlp = { workspace = true, features = [ opentelemetry-semantic-conventions = { workspace = true } opentelemetry-stdout = { workspace = true } opentelemetry_sdk = { workspace = true, features = ["rt-tokio", "logs"] } +reqwest = "0.11.27" tracing = { workspace = true } tracing-core = { workspace = true, default-features = false } tracing-opentelemetry = { workspace = true } @@ -73,14 +85,16 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] } [features] default = [] - [dev-dependencies] rstest.workspace = true once_cell.workspace = true tempfile.workspace = true dotenv.workspace = true httpmock.workspace = true +serial_test.workspace = true tracing-test = "0.2.5" lazy_static.workspace = true mp-utils = { workspace = true, features = ["testing"] } mc-mempool = { workspace = true, features = ["testing"] } +# Compile the test contracts in test cfg. +m-cairo-test-contracts.workspace = true diff --git a/crates/madara/client/eth/README.md b/crates/madara/client/settlement_client/README.md similarity index 100% rename from crates/madara/client/eth/README.md rename to crates/madara/client/settlement_client/README.md diff --git a/crates/madara/client/settlement_client/src/client.rs b/crates/madara/client/settlement_client/src/client.rs new file mode 100644 index 000000000..94d7cce00 --- /dev/null +++ b/crates/madara/client/settlement_client/src/client.rs @@ -0,0 +1,113 @@ +use crate::gas_price::L1BlockMetrics; +use crate::messaging::CommonMessagingEventData; +use crate::state_update::StateUpdate; +use async_trait::async_trait; +use futures::stream::BoxStream; +use futures::Stream; +use mc_db::l1_db::LastSyncedEventBlock; +use mc_db::MadaraBackend; +use mockall::automock; +use mp_utils::service::ServiceContext; +use starknet_types_core::felt::Felt; +use std::sync::Arc; + +pub enum ClientType { + ETH, + STARKNET, +} + +#[derive(Debug, Default, PartialEq)] +pub struct DummyConfig; +pub type DummyStream = BoxStream<'static, Option>>; + +#[automock( + type Config = DummyConfig; + type StreamType = DummyStream; +)] +#[async_trait] +pub trait ClientTrait: Send + Sync { + // Configuration type used for initialization + type Config; + + // Get client type + fn get_client_type(&self) -> ClientType; + + // Create a new instance of the client + async fn new(config: Self::Config) -> anyhow::Result + where + Self: Sized; + + // Get the latest block number + async fn get_latest_block_number(&self) -> anyhow::Result; + + // Get the block number of the last occurrence of the state update event + async fn get_last_event_block_number(&self) -> anyhow::Result; + + // Get the last verified block number + async fn get_last_verified_block_number(&self) -> anyhow::Result; + + // Get the last state root + // - change this to Felt in implementation + // - write tests for conversion to Felt from + async fn get_last_verified_state_root(&self) -> anyhow::Result; + + // Get the last verified block hash + async fn get_last_verified_block_hash(&self) -> anyhow::Result; + + // Get initial state from client + async fn get_initial_state(&self) -> anyhow::Result; + + // Listen for update state events + async fn listen_for_update_state_events( + &self, + backend: Arc, + ctx: ServiceContext, + l1_block_metrics: Arc, + ) -> anyhow::Result<()>; + + // get gas prices + async fn get_gas_prices(&self) -> anyhow::Result<(u128, u128)>; + + // Get message hash from event + fn get_messaging_hash(&self, event: &CommonMessagingEventData) -> anyhow::Result>; + + /// Get cancellation status of an L1 to L2 message + /// + /// This function query the core contract to know if a L1->L2 message has been cancelled + /// # Arguments + /// + /// - msg_hash : Hash of L1 to L2 message + /// + /// # Return + /// + /// - A felt representing a timestamp : + /// - 0 if the message has not been cancelled + /// - timestamp of the cancellation if it has been cancelled + /// - An Error if the call fail + async fn get_l1_to_l2_message_cancellations(&self, msg_hash: Vec) -> anyhow::Result; + + // ============================================================ + // Stream Implementations : + // ============================================================ + + /// The type of Stream that will be returned by get_messaging_stream + /// - Stream: Represents an asynchronous sequence of values + /// - Item: Each element in the stream is wrapped in Option to handle potential gaps + /// - anyhow::Result: Each item is further wrapped in Result for error handling + /// - CommonMessagingEventData: The actual message data structure being streamed + type StreamType: Stream>> + Send; + + /// Retrieves a stream of messaging events starting from the last synced block + /// + /// # Arguments + /// * `last_synced_event_block` - Contains information about the last block that was + /// successfully processed, used as starting point for the new stream + /// + /// # Returns + /// * `anyhow::Result` - Returns the stream if successful, or an error + /// if stream creation fails + async fn get_messaging_stream( + &self, + last_synced_event_block: LastSyncedEventBlock, + ) -> anyhow::Result; +} diff --git a/crates/madara/client/eth/src/error.rs b/crates/madara/client/settlement_client/src/error.rs similarity index 100% rename from crates/madara/client/eth/src/error.rs rename to crates/madara/client/settlement_client/src/error.rs diff --git a/crates/madara/client/settlement_client/src/eth/event.rs b/crates/madara/client/settlement_client/src/eth/event.rs new file mode 100644 index 000000000..66b244dfc --- /dev/null +++ b/crates/madara/client/settlement_client/src/eth/event.rs @@ -0,0 +1,214 @@ +use crate::eth::StarknetCoreContract::LogMessageToL2; +use crate::messaging::CommonMessagingEventData; +use alloy::contract::EventPoller; +use alloy::rpc::types::Log; +use alloy::transports::http::{Client, Http}; +use anyhow::Error; +use futures::Stream; +use starknet_types_core::felt::Felt; +use std::pin::Pin; +use std::task::{Context, Poll}; + +type StreamItem = Result<(LogMessageToL2, Log), alloy::sol_types::Error>; +type StreamType = Pin + Send + 'static>>; + +pub struct EthereumEventStream { + pub stream: StreamType, +} + +impl EthereumEventStream { + pub fn new(watcher: EventPoller, LogMessageToL2>) -> Self { + let stream = watcher.into_stream(); + Self { stream: Box::pin(stream) } + } +} + +impl Stream for EthereumEventStream { + type Item = Option>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.stream.as_mut().poll_next(cx) { + Poll::Ready(Some(result)) => match result { + Ok((event, log)) => { + let event_data = (|| -> anyhow::Result { + Ok(CommonMessagingEventData { + from: Felt::from_bytes_be_slice(event.fromAddress.as_slice()), + to: Felt::from_bytes_be_slice(event.toAddress.to_be_bytes_vec().as_slice()), + selector: Felt::from_bytes_be_slice(event.selector.to_be_bytes_vec().as_slice()), + nonce: Felt::from_bytes_be_slice(event.nonce.to_be_bytes_vec().as_slice()), + payload: { + let mut payload_vec = vec![]; + event.payload.iter().for_each(|ele| { + payload_vec.push(Felt::from_bytes_be_slice(ele.to_be_bytes_vec().as_slice())) + }); + payload_vec + }, + fee: Some( + event.fee.try_into().map_err(|e| anyhow::anyhow!("Felt conversion error: {}", e))?, + ), + transaction_hash: Felt::from_bytes_be_slice( + log.transaction_hash + .ok_or_else(|| anyhow::anyhow!("Missing transaction hash"))? + .to_vec() + .as_slice(), + ), + message_hash: None, + block_number: log.block_number.ok_or_else(|| anyhow::anyhow!("Missing block number"))?, + event_index: Some(log.log_index.ok_or_else(|| anyhow::anyhow!("Missing log index"))?), + }) + })(); + + Poll::Ready(Some(Some(event_data))) + } + Err(e) => Poll::Ready(Some(Some(Err(Error::from(e))))), + }, + Poll::Ready(None) => Poll::Ready(Some(None)), + Poll::Pending => Poll::Pending, + } + } +} + +#[cfg(test)] +pub mod eth_event_stream_tests { + use super::*; + use alloy::primitives::{Address, LogData, B256, U256}; + use futures::stream::iter; + use futures::StreamExt; + use rstest::rstest; + use std::str::FromStr; + + // Helper function to create mock event + pub fn create_mock_event() -> LogMessageToL2 { + LogMessageToL2 { + fromAddress: Address::from_str("0x1234567890123456789012345678901234567890").unwrap(), + toAddress: U256::from(1u64), + selector: U256::from(2u64), + fee: U256::from(1000u64), + nonce: U256::from(1u64), + payload: vec![U256::from(1u64), U256::from(2u64)], + } + } + + // Helper function to create mock log + pub fn create_mock_log() -> Log { + Log { + inner: alloy::primitives::Log { + address: Address::from_str("0x1234567890123456789012345678901234567890").unwrap(), + data: LogData::default(), + }, + block_hash: Some( + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000002").unwrap(), + ), + block_number: Some(100), + block_timestamp: Some(1643234567), + transaction_hash: Some( + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000003").unwrap(), + ), + transaction_index: Some(0), + log_index: Some(0), + removed: false, + } + } + + #[tokio::test] + #[rstest] + async fn test_successful_event_stream() { + // Create a sequence of mock events + let mock_events = + vec![Ok((create_mock_event(), create_mock_log())), Ok((create_mock_event(), create_mock_log()))]; + + // Create a mock stream from the events + let mock_stream = iter(mock_events); + + // Create EthereumEventStream with mock stream + let mut ethereum_stream = EthereumEventStream { stream: Box::pin(mock_stream) }; + + let mut events = Vec::new(); + + while let Some(Some(event)) = ethereum_stream.next().await { + events.push(event); + } + + assert_eq!(events.len(), 2); + + // Verify first event + match &events[0] { + Ok(event_data) => { + assert_eq!(event_data.block_number, 100); + assert_eq!(event_data.event_index, Some(0u64)); + } + _ => panic!("Expected successful event"), + } + } + + #[tokio::test] + #[rstest] + async fn test_error_handling() { + // Create a stream with an error + let mock_events = vec![Err(alloy::sol_types::Error::InvalidLog { name: "", log: Box::default() })]; + + let mock_stream = iter(mock_events); + + let mut ethereum_stream = EthereumEventStream { stream: Box::pin(mock_stream) }; + + let event = ethereum_stream.next().await.unwrap(); + + match event { + Some(Err(_)) => { /* Test passed */ } + _ => panic!("Expected error event"), + } + } + + #[tokio::test] + #[rstest] + async fn test_empty_stream() { + // Create an empty stream + let mock_events: Vec> = vec![]; + let mock_stream = iter(mock_events); + + let mut ethereum_stream = EthereumEventStream { stream: Box::pin(mock_stream) }; + + let event = ethereum_stream.next().await; + + assert!(event.unwrap().is_none(), "Expected None for empty stream"); + } + + #[tokio::test] + #[rstest] + async fn test_mixed_events() { + // Create a stream with mixed success and error events + let mock_events = vec![ + Ok((create_mock_event(), create_mock_log())), + Err(alloy::sol_types::Error::InvalidLog { name: "", log: Box::default() }), + Ok((create_mock_event(), create_mock_log())), + ]; + + let mock_stream = iter(mock_events); + + let mut ethereum_stream = EthereumEventStream { stream: Box::pin(mock_stream) }; + + let mut events = Vec::new(); + + while let Some(Some(event)) = ethereum_stream.next().await { + events.push(event); + } + + assert_eq!(events.len(), 3); + + // Verify event sequence + match &events[0] { + Ok(_) => {} + _ => panic!("First event should be successful"), + } + + match &events[1] { + Err(_) => {} + _ => panic!("Second event should be an error"), + } + + match &events[2] { + Ok(_) => {} + _ => panic!("Third event should be successful"), + } + } +} diff --git a/crates/madara/client/settlement_client/src/eth/mod.rs b/crates/madara/client/settlement_client/src/eth/mod.rs new file mode 100644 index 000000000..3638c0e8b --- /dev/null +++ b/crates/madara/client/settlement_client/src/eth/mod.rs @@ -0,0 +1,860 @@ +pub mod event; + +use crate::client::{ClientTrait, ClientType}; +use crate::eth::event::EthereumEventStream; +use crate::eth::StarknetCoreContract::{LogMessageToL2, StarknetCoreContractInstance}; +use crate::gas_price::L1BlockMetrics; +use crate::messaging::CommonMessagingEventData; +use crate::state_update::{update_l1, StateUpdate}; +use crate::utils::{convert_log_state_update, felt_to_u256, u256_to_felt}; +use alloy::eips::BlockNumberOrTag; +use alloy::primitives::{keccak256, Address, B256, U256}; +use alloy::providers::{Provider, ProviderBuilder, ReqwestProvider, RootProvider}; +use alloy::rpc::types::Filter; +use alloy::sol; +use alloy::sol_types::SolValue; +use alloy::transports::http::{Client, Http}; +use anyhow::{bail, Context}; +use async_trait::async_trait; +use bitvec::macros::internal::funty::Fundamental; +use futures::StreamExt; +use mc_db::l1_db::LastSyncedEventBlock; +use mc_db::MadaraBackend; +use mp_utils::service::ServiceContext; +use starknet_types_core::felt::Felt; +use std::sync::Arc; +use url::Url; + +// abi taken from: https://etherscan.io/address/0x6e0acfdc3cf17a7f99ed34be56c3dfb93f464e24#code +// The official starknet core contract ^ +sol!( + #[sol(rpc)] + #[derive(Debug)] + StarknetCoreContract, + "src/eth/starknet_core.json" +); + +const ERR_ARCHIVE: &str = + "Failed to watch event filter - Ensure you are using an L1 RPC endpoint that points to an archive node"; + +pub struct EthereumClient { + pub provider: Arc, + pub l1_core_contract: StarknetCoreContractInstance, RootProvider>>, +} + +#[derive(Clone)] +pub struct EthereumClientConfig { + pub url: Url, + pub l1_core_address: Address, +} + +impl Clone for EthereumClient { + fn clone(&self) -> Self { + EthereumClient { provider: Arc::clone(&self.provider), l1_core_contract: self.l1_core_contract.clone() } + } +} + +#[async_trait] +impl ClientTrait for EthereumClient { + type Config = EthereumClientConfig; + + fn get_client_type(&self) -> ClientType { + ClientType::ETH + } + + /// Create a new EthereumClient instance with the given RPC URL + async fn new(config: EthereumClientConfig) -> anyhow::Result { + let provider = ProviderBuilder::new().on_http(config.url); + // Checking if core contract exists on l1 + let l1_core_contract_bytecode = provider.get_code_at(config.l1_core_address).await?; + if l1_core_contract_bytecode.is_empty() { + bail!("The L1 Core Contract could not be found. Check that the L2 chain matches the L1 RPC endpoint."); + } + let core_contract = StarknetCoreContract::new(config.l1_core_address, provider.clone()); + Ok(Self { provider: Arc::new(provider), l1_core_contract: core_contract }) + } + + /// Retrieves the latest Ethereum block number + async fn get_latest_block_number(&self) -> anyhow::Result { + let block_number = self.provider.get_block_number().await?.as_u64(); + Ok(block_number) + } + + /// Get the block number of the last occurrence of a given event. + async fn get_last_event_block_number(&self) -> anyhow::Result { + let latest_block: u64 = self.get_latest_block_number().await?; + + // Assuming an avg Block time of 15sec we check for a LogStateUpdate occurence in the last ~24h + let filter = Filter::new() + .from_block(latest_block - 6000) + .to_block(latest_block) + .address(*self.l1_core_contract.address()); + + let logs = self.provider.get_logs(&filter).await?; + + let filtered_logs = logs + .into_iter() + .filter_map(|log| log.log_decode::().ok()) + .collect::>(); + + if let Some(last_log) = filtered_logs.last() { + let last_block: u64 = last_log.block_number.context("no block number in log")?; + Ok(last_block) + } else { + bail!("no event found") + } + } + + /// Get the last Starknet block number verified on L1 + async fn get_last_verified_block_number(&self) -> anyhow::Result { + let block_number = self.l1_core_contract.stateBlockNumber().call().await?; + let last_block_number: u64 = (block_number._0).as_u64(); + Ok(last_block_number) + } + + /// Get the last Starknet state root verified on L1 + async fn get_last_verified_state_root(&self) -> anyhow::Result { + let state_root = self.l1_core_contract.stateRoot().call().await?; + u256_to_felt(state_root._0) + } + + /// Get the last Starknet block hash verified on L1 + async fn get_last_verified_block_hash(&self) -> anyhow::Result { + let block_hash = self.l1_core_contract.stateBlockHash().call().await?; + u256_to_felt(block_hash._0) + } + + async fn get_initial_state(&self) -> anyhow::Result { + let block_number = self.get_last_verified_block_number().await?; + let block_hash = self.get_last_verified_block_hash().await?; + let global_root = self.get_last_verified_state_root().await?; + + Ok(StateUpdate { global_root, block_number, block_hash }) + } + + async fn listen_for_update_state_events( + &self, + backend: Arc, + mut ctx: ServiceContext, + l1_block_metrics: Arc, + ) -> anyhow::Result<()> { + // Listen to LogStateUpdate (0x77552641) update and send changes continuously + let event_filter = self.l1_core_contract.event_filter::(); + + let mut event_stream = match ctx.run_until_cancelled(event_filter.watch()).await { + Some(res) => res.context(ERR_ARCHIVE)?.into_stream(), + None => return anyhow::Ok(()), + }; + + while let Some(Some(event_result)) = ctx.run_until_cancelled(event_stream.next()).await { + let log = event_result.context("listening for events")?; + let format_event: StateUpdate = + convert_log_state_update(log.0.clone()).context("formatting event into an L1StateUpdate")?; + update_l1(&backend, format_event, l1_block_metrics.clone())?; + } + + Ok(()) + } + + async fn get_gas_prices(&self) -> anyhow::Result<(u128, u128)> { + let block_number = self.get_latest_block_number().await?; + let fee_history = self.provider.get_fee_history(300, BlockNumberOrTag::Number(block_number), &[]).await?; + + // The RPC responds with 301 elements for some reason. It's also just safer to manually + // take the last 300. We choose 300 to get average gas caprice for last one hour (300 * 12 sec block + // time). + let (_, blob_fee_history_one_hour) = + fee_history.base_fee_per_blob_gas.split_at(fee_history.base_fee_per_blob_gas.len().max(300) - 300); + + let avg_blob_base_fee = if !blob_fee_history_one_hour.is_empty() { + blob_fee_history_one_hour.iter().sum::() / blob_fee_history_one_hour.len() as u128 + } else { + 0 // in case blob_fee_history_one_hour has 0 length + }; + + let eth_gas_price = fee_history.base_fee_per_gas.last().context("Getting eth gas price")?; + Ok((*eth_gas_price, avg_blob_base_fee)) + } + + fn get_messaging_hash(&self, event: &CommonMessagingEventData) -> anyhow::Result> { + let mut payload_vec = Vec::new(); + for ele in event.payload.clone() { + payload_vec.push(felt_to_u256(ele)); + } + + let from_address_start_index = event.from.to_bytes_be().as_slice().len().saturating_sub(20); + let data = ( + [0u8; 12], + Address::from_slice(&event.from.to_bytes_be().as_slice()[from_address_start_index..]), + felt_to_u256(event.to), + felt_to_u256(event.nonce), + felt_to_u256(event.selector), + U256::from(event.payload.len()), + payload_vec, + ); + Ok(keccak256(data.abi_encode_packed()).as_slice().to_vec()) + } + + /// Get cancellation status of an L1 to L2 message + /// + /// This function query the core contract to know if a L1->L2 message has been cancelled + /// # Arguments + /// + /// - msg_hash : Hash of L1 to L2 message + /// + /// # Return + /// + /// - A felt representing a timestamp : + /// - 0 if the message has not been cancelled + /// - timestamp of the cancellation if it has been cancelled + /// - An Error if the call fail + async fn get_l1_to_l2_message_cancellations(&self, msg_hash: Vec) -> anyhow::Result { + //l1ToL2MessageCancellations + let cancellation_timestamp = + self.l1_core_contract.l1ToL2MessageCancellations(B256::from_slice(msg_hash.as_slice())).call().await?; + u256_to_felt(cancellation_timestamp._0) + } + + type StreamType = EthereumEventStream; + async fn get_messaging_stream( + &self, + last_synced_event_block: LastSyncedEventBlock, + ) -> anyhow::Result { + let filter = self.l1_core_contract.event_filter::(); + let event_stream = filter + .from_block(last_synced_event_block.block_number) + .to_block(BlockNumberOrTag::Finalized) + .watch() + .await?; + Ok(EthereumEventStream::new(event_stream)) + } +} + +#[cfg(test)] +pub mod eth_client_getter_test { + use super::*; + use alloy::{ + node_bindings::{Anvil, AnvilInstance}, + primitives::U256, + }; + + use serial_test::serial; + use std::ops::Range; + use std::sync::Mutex; + use tokio; + + // https://etherscan.io/tx/0xcadb202495cd8adba0d9b382caff907abf755cd42633d23c4988f875f2995d81#eventlog + // The txn we are referring to it is here ^ + const L1_BLOCK_NUMBER: u64 = 20395662; + const CORE_CONTRACT_ADDRESS: &str = "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4"; + const L2_BLOCK_NUMBER: u64 = 662703; + const L2_BLOCK_HASH: &str = "563216050958639290223177746678863910249919294431961492885921903486585884664"; + const L2_STATE_ROOT: &str = "1456190284387746219409791261254265303744585499659352223397867295223408682130"; + + lazy_static::lazy_static! { + static ref FORK_URL: String = std::env::var("ETH_FORK_URL").expect("ETH_FORK_URL not set"); + } + + const PORT_RANGE: Range = 19500..20000; + + struct AvailablePorts> { + to_reuse: Vec, + next: I, + } + + lazy_static::lazy_static! { + static ref AVAILABLE_PORTS: Mutex>> = Mutex::new(AvailablePorts { to_reuse: vec![], next: PORT_RANGE }); + } + pub struct AnvilPortNum(pub u16); + impl Drop for AnvilPortNum { + fn drop(&mut self) { + let mut guard = AVAILABLE_PORTS.lock().expect("poisoned lock"); + guard.to_reuse.push(self.0); + } + } + + pub fn get_port() -> AnvilPortNum { + let mut guard = AVAILABLE_PORTS.lock().expect("poisoned lock"); + if let Some(el) = guard.to_reuse.pop() { + return AnvilPortNum(el); + } + AnvilPortNum(guard.next.next().expect("no more port to use")) + } + + static ANVIL: Mutex>> = Mutex::new(None); + + pub fn get_shared_anvil() -> Arc { + let mut anvil = ANVIL.lock().expect("poisoned lock"); + if anvil.is_none() { + *anvil = Some(Arc::new(create_anvil_instance())); + } + Arc::clone(anvil.as_ref().unwrap()) + } + + pub fn create_anvil_instance() -> AnvilInstance { + let port = get_port(); + let anvil = Anvil::new() + .fork(FORK_URL.clone()) + .fork_block_number(L1_BLOCK_NUMBER) + .port(port.0) + .timeout(60_000) + .try_spawn() + .expect("failed to spawn anvil instance"); + println!("Anvil started and running at `{}`", anvil.endpoint()); + anvil + } + + pub fn create_ethereum_client(url: Option<&str>) -> EthereumClient { + let rpc_url: Url = url.unwrap_or("http://localhost:8545").parse().expect("issue while parsing URL"); + + let provider = ProviderBuilder::new().on_http(rpc_url.clone()); + let address = Address::parse_checksummed(CORE_CONTRACT_ADDRESS, None).unwrap(); + let contract = StarknetCoreContract::new(address, provider.clone()); + + EthereumClient { provider: Arc::new(provider), l1_core_contract: contract.clone() } + } + + #[serial] + #[tokio::test] + async fn fail_create_new_client_invalid_core_contract() { + let anvil = get_shared_anvil(); + // Sepolia core contract instead of mainnet + const INVALID_CORE_CONTRACT_ADDRESS: &str = "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057"; + + let rpc_url: Url = anvil.endpoint_url(); + + let core_contract_address = Address::parse_checksummed(INVALID_CORE_CONTRACT_ADDRESS, None).unwrap(); + + let ethereum_client_config = EthereumClientConfig { url: rpc_url, l1_core_address: core_contract_address }; + + let new_client_result = EthereumClient::new(ethereum_client_config).await; + assert!(new_client_result.is_err(), "EthereumClient::new should fail with an invalid core contract address"); + } + + #[serial] + #[tokio::test] + async fn get_latest_block_number_works() { + let anvil = get_shared_anvil(); + let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); + let block_number = + eth_client.provider.get_block_number().await.expect("issue while fetching the block number").as_u64(); + assert_eq!(block_number, L1_BLOCK_NUMBER, "provider unable to get the correct block number"); + } + + #[serial] + #[tokio::test] + async fn get_last_event_block_number_works() { + let anvil = get_shared_anvil(); + let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); + let block_number = eth_client + .get_last_event_block_number() + .await + .expect("issue while getting the last block number with given event"); + assert_eq!(block_number, L1_BLOCK_NUMBER, "block number with given event not matching"); + } + + #[serial] + #[tokio::test] + async fn get_last_verified_block_hash_works() { + let anvil = get_shared_anvil(); + let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); + let block_hash = + eth_client.get_last_verified_block_hash().await.expect("issue while getting the last verified block hash"); + let expected = u256_to_felt(U256::from_str_radix(L2_BLOCK_HASH, 10).unwrap()).unwrap(); + assert_eq!(block_hash, expected, "latest block hash not matching"); + } + + #[serial] + #[tokio::test] + async fn get_last_state_root_works() { + let anvil = get_shared_anvil(); + let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); + let state_root = eth_client.get_last_verified_state_root().await.expect("issue while getting the state root"); + let expected = u256_to_felt(U256::from_str_radix(L2_STATE_ROOT, 10).unwrap()).unwrap(); + assert_eq!(state_root, expected, "latest block state root not matching"); + } + + #[serial] + #[tokio::test] + async fn get_last_verified_block_number_works() { + let anvil = get_shared_anvil(); + let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); + let block_number = eth_client.get_last_verified_block_number().await.expect("issue"); + assert_eq!(block_number, L2_BLOCK_NUMBER, "verified block number not matching"); + } +} + +#[cfg(test)] +mod l1_messaging_tests { + + use std::{sync::Arc, time::Duration}; + + use self::DummyContract::DummyContractInstance; + use crate::client::ClientTrait; + use crate::eth::{EthereumClient, StarknetCoreContract}; + use crate::messaging::{sync, CommonMessagingEventData}; + use alloy::{ + hex::FromHex, + node_bindings::{Anvil, AnvilInstance}, + primitives::{Address, U256}, + providers::{ProviderBuilder, RootProvider}, + sol, + transports::http::{Client, Http}, + }; + use mc_db::DatabaseService; + use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; + use mp_chain_config::ChainConfig; + use mp_utils::service::ServiceContext; + use rstest::*; + use starknet_api::core::Nonce; + use starknet_types_core::felt::Felt; + use tempfile::TempDir; + use tracing_test::traced_test; + use url::Url; + + struct TestRunner { + #[allow(dead_code)] + anvil: AnvilInstance, // Not used but needs to stay in scope otherwise it will be dropped + chain_config: Arc, + db_service: Arc, + dummy_contract: DummyContractInstance, RootProvider>>, + eth_client: EthereumClient, + mempool: Arc, + } + + // LogMessageToL2 from https://etherscan.io/tx/0x21980d6674d33e50deee43c6c30ef3b439bd148249b4539ce37b7856ac46b843 + // bytecode is compiled DummyContractBasicTestCase + sol!( + #[derive(Debug)] + #[sol(rpc, bytecode="6080604052348015600e575f80fd5b506108258061001c5f395ff3fe608060405234801561000f575f80fd5b506004361061004a575f3560e01c80634185df151461004e57806390985ef9146100585780639be446bf14610076578063af56443a146100a6575b5f80fd5b6100566100c2565b005b61006061013b565b60405161006d9190610488565b60405180910390f35b610090600480360381019061008b91906104cf565b6101ac565b60405161009d9190610512565b60405180910390f35b6100c060048036038101906100bb9190610560565b6101d8565b005b5f6100cb6101f3565b905080604001518160200151825f015173ffffffffffffffffffffffffffffffffffffffff167fdb80dd488acf86d17c747445b0eabb5d57c541d3bd7b6b87af987858e5066b2b846060015185608001518660a0015160405161013093929190610642565b60405180910390a450565b5f806101456101f3565b9050805f015173ffffffffffffffffffffffffffffffffffffffff1681602001518260800151836040015184606001515185606001516040516020016101909695949392919061072a565b6040516020818303038152906040528051906020012091505090565b5f805f9054906101000a900460ff166101c5575f6101cb565b6366b4f1055b63ffffffff169050919050565b805f806101000a81548160ff02191690831515021790555050565b6101fb610429565b5f73ae0ee0a63a2ce6baeeffe56e7714fb4efe48d41990505f7f073314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b8290505f7f01b64b1b3b690b43b9b514fb81377518f4039cd3e4f4914d8a6bdf01d679fb1990505f600767ffffffffffffffff81111561027757610276610795565b5b6040519080825280602002602001820160405280156102a55781602001602082028036833780820191505090505b5090506060815f815181106102bd576102bc6107c2565b5b60200260200101818152505062195091816001815181106102e1576102e06107c2565b5b60200260200101818152505065231594f0c7ea81600281518110610308576103076107c2565b5b60200260200101818152505060058160038151811061032a576103296107c2565b5b602002602001018181525050624554488160048151811061034e5761034d6107c2565b5b60200260200101818152505073bdb193c166cfb7be2e51711c5648ebeef94063bb81600581518110610383576103826107c2565b5b6020026020010181815250507e7d79cd86ba27a2508a9ca55c8b3474ca082bc5173d0467824f07a32e9db888816006815181106103c3576103c26107c2565b5b6020026020010181815250505f662386f26fc1000090505f6040518060c001604052808773ffffffffffffffffffffffffffffffffffffffff16815260200186815260200185815260200184815260200183815260200182815250965050505050505090565b6040518060c001604052805f73ffffffffffffffffffffffffffffffffffffffff1681526020015f81526020015f8152602001606081526020015f81526020015f81525090565b5f819050919050565b61048281610470565b82525050565b5f60208201905061049b5f830184610479565b92915050565b5f80fd5b6104ae81610470565b81146104b8575f80fd5b50565b5f813590506104c9816104a5565b92915050565b5f602082840312156104e4576104e36104a1565b5b5f6104f1848285016104bb565b91505092915050565b5f819050919050565b61050c816104fa565b82525050565b5f6020820190506105255f830184610503565b92915050565b5f8115159050919050565b61053f8161052b565b8114610549575f80fd5b50565b5f8135905061055a81610536565b92915050565b5f60208284031215610575576105746104a1565b5b5f6105828482850161054c565b91505092915050565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b6105bd816104fa565b82525050565b5f6105ce83836105b4565b60208301905092915050565b5f602082019050919050565b5f6105f08261058b565b6105fa8185610595565b9350610605836105a5565b805f5b8381101561063557815161061c88826105c3565b9750610627836105da565b925050600181019050610608565b5085935050505092915050565b5f6060820190508181035f83015261065a81866105e6565b90506106696020830185610503565b6106766040830184610503565b949350505050565b5f819050919050565b610698610693826104fa565b61067e565b82525050565b5f81905092915050565b6106b1816104fa565b82525050565b5f6106c283836106a8565b60208301905092915050565b5f6106d88261058b565b6106e2818561069e565b93506106ed836105a5565b805f5b8381101561071d57815161070488826106b7565b975061070f836105da565b9250506001810190506106f0565b5085935050505092915050565b5f6107358289610687565b6020820191506107458288610687565b6020820191506107558287610687565b6020820191506107658286610687565b6020820191506107758285610687565b60208201915061078582846106ce565b9150819050979650505050505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffdfea2646970667358221220ddc41ccc2cc8b33e1f608fb6cabf9ead1150daa8798e94e03ce9cd61e0d9389164736f6c634300081a0033")] + contract DummyContract { + bool isCanceled; + event LogMessageToL2(address indexed _fromAddress, uint256 indexed _toAddress, uint256 indexed _selector, uint256[] payload, uint256 nonce, uint256 fee); + + struct MessageData { + address fromAddress; + uint256 toAddress; + uint256 selector; + uint256[] payload; + uint256 nonce; + uint256 fee; + } + + function getMessageData() internal pure returns (MessageData memory) { + address fromAddress = address(993696174272377493693496825928908586134624850969); + uint256 toAddress = 3256441166037631918262930812410838598500200462657642943867372734773841898370; + uint256 selector = 774397379524139446221206168840917193112228400237242521560346153613428128537; + uint256[] memory payload = new uint256[](7); + payload[0] = 96; + payload[1] = 1659025; + payload[2] = 38575600093162; + payload[3] = 5; + payload[4] = 4543560; + payload[5] = 1082959358903034162641917759097118582889062097851; + payload[6] = 221696535382753200248526706088340988821219073423817576256483558730535647368; + uint256 nonce = 10000000000000000; + uint256 fee = 0; + + return MessageData(fromAddress, toAddress, selector, payload, nonce, fee); + } + + function fireEvent() public { + MessageData memory data = getMessageData(); + emit LogMessageToL2(data.fromAddress, data.toAddress, data.selector, data.payload, data.nonce, data.fee); + } + + function l1ToL2MessageCancellations(bytes32 msgHash) external view returns (uint256) { + return isCanceled ? 1723134213 : 0; + } + + function setIsCanceled(bool value) public { + isCanceled = value; + } + + function getL1ToL2MsgHash() external pure returns (bytes32) { + MessageData memory data = getMessageData(); + return keccak256( + abi.encodePacked( + uint256(uint160(data.fromAddress)), + data.toAddress, + data.nonce, + data.selector, + data.payload.length, + data.payload + ) + ); + } + } + ); + + /// Common setup for tests + /// + /// This test performs the following steps: + /// 1. Sets up test environemment + /// 2. Starts worker + /// 3. Fires a Message event from the dummy contract + /// 4. Waits for event to be processed + /// 5. Assert that the worker handle the event with correct data + /// 6. Assert that the hash computed by the worker is correct + /// 7. TODO : Assert that the tx is succesfully submited to the mempool + /// 8. Assert that the event is successfully pushed to the db + /// 9. TODO : Assert that the tx was correctly executed + #[fixture] + async fn setup_test_env() -> TestRunner { + // Start Anvil instance + let anvil = Anvil::new().block_time(1).chain_id(1337).try_spawn().expect("failed to spawn anvil instance"); + println!("Anvil started and running at `{}`", anvil.endpoint()); + + // Set up chain info + let chain_config = Arc::new(ChainConfig::madara_test()); + + // Set up database paths + let temp_dir = TempDir::new().expect("issue while creating temporary directory"); + let base_path = temp_dir.path().join("data"); + let backup_dir = Some(temp_dir.path().join("backups")); + + // Initialize database service + let db = Arc::new( + DatabaseService::new(&base_path, backup_dir, false, chain_config.clone(), Default::default()) + .await + .expect("Failed to create database service"), + ); + + let l1_gas_setter = GasPriceProvider::new(); + let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); + + let mempool = Arc::new(Mempool::new( + Arc::clone(db.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::for_testing(), + )); + + // Set up provider + let rpc_url: Url = anvil.endpoint().parse().expect("issue while parsing"); + let provider = ProviderBuilder::new().on_http(rpc_url); + + // Set up dummy contract + let contract = DummyContract::deploy(provider.clone()).await.unwrap(); + + let core_contract = StarknetCoreContract::new(*contract.address(), provider.clone()); + + let eth_client = + EthereumClient { provider: Arc::new(provider.clone()), l1_core_contract: core_contract.clone() }; + + TestRunner { anvil, chain_config, db_service: db, dummy_contract: contract, eth_client, mempool } + } + + /// Test the basic workflow of l1 -> l2 messaging + /// + /// This test performs the following steps: + /// 1. Sets up test environemment + /// 2. Starts worker + /// 3. Fires a Message event from the dummy contract + /// 4. Waits for event to be processed + /// 5. Assert that the worker handle the event with correct data + /// 6. Assert that the hash computed by the worker is correct + /// 7. TODO : Assert that the tx is succesfully submited to the mempool + /// 8. Assert that the event is successfully pushed to the db + /// 9. TODO : Assert that the tx was correctly executed + #[rstest] + #[traced_test] + #[tokio::test] + async fn e2e_test_basic_workflow(#[future] setup_test_env: TestRunner) { + let TestRunner { chain_config, db_service: db, dummy_contract: contract, eth_client, anvil: _anvil, mempool } = + setup_test_env.await; + + // Start worker + let worker_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + sync( + Arc::new(Box::new(eth_client)), + Arc::clone(db.backend()), + chain_config.chain_id.clone(), + mempool, + ServiceContext::new_for_testing(), + ) + .await + }) + }; + + let _ = contract.setIsCanceled(false).send().await; + // Send a Event and wait for processing, Panic if fail + let _ = contract.fireEvent().send().await.expect("Failed to fire event"); + tokio::time::sleep(Duration::from_secs(5)).await; + + // Assert that event was caught by the worker with correct data + // TODO: Maybe add some more assert + assert!(logs_contain("fromAddress: \"0xae0ee0a63a2ce6baeeffe56e7714fb4efe48d419\"")); + + // Assert the tx hash computed by the worker is correct + let event_hash = contract.getL1ToL2MsgHash().call().await.expect("failed to get hash")._0.to_string(); + assert!(logs_contain(&format!("event hash: {:?}", event_hash))); + + // TODO : Assert that the tx has been included in the mempool + + // Assert that the event is well stored in db + let last_block = + db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); + assert_ne!(last_block.block_number, 0); + let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); + assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); + // TODO : Assert that the tx was correctly executed + + // Explicitly cancel the listen task, else it would be running in the background + worker_handle.abort(); + } + + /// Test the workflow of l1 -> l2 messaging with duplicate event + /// + /// This test performs the following steps: + /// 1. Sets up test environemment + /// 2. Starts worker + /// 3. Fires a Message event from the dummy contract + /// 4. Waits for event to be processed + /// 5. Assert that the event is well stored in db + /// 6. Fires a Message with the same event from the dummy contract + /// 7. Assert that the last event stored is the first one + #[rstest] + #[traced_test] + #[tokio::test] + async fn e2e_test_already_processed_event(#[future] setup_test_env: TestRunner) { + let TestRunner { chain_config, db_service: db, dummy_contract: contract, eth_client, anvil: _anvil, mempool } = + setup_test_env.await; + + // Start worker + let worker_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + sync( + Arc::new(Box::new(eth_client)), + Arc::clone(db.backend()), + chain_config.chain_id.clone(), + mempool, + ServiceContext::new_for_testing(), + ) + .await + }) + }; + + let _ = contract.setIsCanceled(false).send().await; + let _ = contract.fireEvent().send().await.expect("Failed to fire event"); + tokio::time::sleep(Duration::from_secs(5)).await; + let last_block = + db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); + assert_ne!(last_block.block_number, 0); + let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); + assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); + + // Send the event a second time + let _ = contract.fireEvent().send().await.expect("Failed to fire event"); + tokio::time::sleep(Duration::from_secs(5)).await; + // Assert that the last event in db is still the same as it is already processed (same nonce) + assert_eq!( + last_block.block_number, + db.backend() + .messaging_last_synced_l1_block_with_event() + .expect("failed to retrieve block") + .unwrap() + .block_number + ); + assert!(logs_contain("Event already processed")); + + worker_handle.abort(); + } + + /// Test the workflow of l1 -> l2 messaging with message cancelled + /// + /// This test performs the following steps: + /// 1. Sets up test environemment + /// 2. Starts worker + /// 3. Fires a Message event from the dummy contract + /// 4. Waits for event to be processed + /// 5. Assert that the event is not stored in db + #[rstest] + #[traced_test] + #[tokio::test] + async fn e2e_test_message_canceled(#[future] setup_test_env: TestRunner) { + let TestRunner { chain_config, db_service: db, dummy_contract: contract, eth_client, anvil: _anvil, mempool } = + setup_test_env.await; + + // Start worker + let worker_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + sync( + Arc::new(Box::new(eth_client)), + Arc::clone(db.backend()), + chain_config.chain_id.clone(), + mempool, + ServiceContext::new_for_testing(), + ) + .await + }) + }; + + // Mock cancelled message + let _ = contract.setIsCanceled(true).send().await; + let _ = contract.fireEvent().send().await.expect("Failed to fire event"); + tokio::time::sleep(Duration::from_secs(5)).await; + let last_block = + db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); + assert_eq!(last_block.block_number, 0); + let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); + // cancelled message nonce should be inserted to avoid reprocessing + assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); + assert!(logs_contain("Message was cancelled in block at timestamp: 0x66b4f105")); + + worker_handle.abort(); + } + + /// Test taken from starknet.rs to ensure consistency + /// https://github.com/xJonathanLEI/starknet-rs/blob/2ddc69479d326ed154df438d22f2d720fbba746e/starknet-core/src/types/msg.rs#L96 + #[rstest] + #[tokio::test] + async fn test_msg_to_l2_hash() { + let TestRunner { + chain_config: _chain_config, + db_service: _db, + dummy_contract: _contract, + eth_client, + anvil: _anvil, + mempool: _mempool, + } = setup_test_env().await; + + let msg = eth_client + .get_messaging_hash(&CommonMessagingEventData { + from: Felt::from_bytes_be_slice( + Address::from_hex("c3511006C04EF1d78af4C8E0e74Ec18A6E64Ff9e").unwrap().0 .0.to_vec().as_slice(), + ), + to: Felt::from_hex("0x73314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b82").unwrap(), + selector: Felt::from_hex("0x2d757788a8d8d6f21d1cd40bce38a8222d70654214e96ff95d8086e684fbee5").unwrap(), + payload: vec![ + Felt::from_hex("0x689ead7d814e51ed93644bc145f0754839b8dcb340027ce0c30953f38f55d7").unwrap(), + Felt::from_hex("0x2c68af0bb140000").unwrap(), + Felt::from_hex("0x0").unwrap(), + ], + nonce: Felt::from_bytes_be_slice(U256::from(775628).to_be_bytes_vec().as_slice()), + fee: Some(u128::try_from(Felt::from_bytes_be_slice(U256::ZERO.to_be_bytes_vec().as_slice())).unwrap()), + transaction_hash: Felt::ZERO, + message_hash: None, + block_number: 0, + event_index: None, + }) + .expect("Failed to compute l1 to l2 msg hash"); + + let expected_hash = + <[u8; 32]>::from_hex("c51a543ef9563ad2545342b390b67edfcddf9886aa36846cf70382362fc5fab3").unwrap(); + + assert_eq!(msg, expected_hash); + } +} + +#[cfg(test)] +mod eth_client_event_subscription_test { + use super::*; + use std::{sync::Arc, time::Duration}; + + use crate::eth::event::EthereumEventStream; + use crate::eth::{EthereumClient, EthereumClientConfig, StarknetCoreContract}; + use crate::state_update::state_update_worker; + use alloy::{node_bindings::Anvil, providers::ProviderBuilder, sol}; + use mc_db::DatabaseService; + use mp_chain_config::ChainConfig; + use rstest::*; + use tempfile::TempDir; + use url::Url; + + sol!( + #[sol(rpc, bytecode="6080604052348015600e575f80fd5b506101618061001c5f395ff3fe608060405234801561000f575f80fd5b5060043610610029575f3560e01c80634185df151461002d575b5f80fd5b610035610037565b005b5f7f0639349b21e886487cd6b341de2050db8ab202d9c6b0e7a2666d598e5fcf81a690505f620a1caf90505f7f0279b69383ea92624c1ae4378ac7fae6428f47bbd21047ea0290c3653064188590507fd342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576c8383836040516100b9939291906100f6565b60405180910390a1505050565b5f819050919050565b6100d8816100c6565b82525050565b5f819050919050565b6100f0816100de565b82525050565b5f6060820190506101095f8301866100cf565b61011660208301856100e7565b61012360408301846100cf565b94935050505056fea2646970667358221220fbc6fd165c86ed9af0c5fcab2830d4a72894fd6a98e9c16dbf9101c4c22e2f7d64736f6c634300081a0033")] + contract DummyContract { + event LogStateUpdate(uint256 globalRoot, int256 blockNumber, uint256 blockHash); + + function fireEvent() public { + uint256 globalRoot = 2814950447364693428789615812443623689251959344851195711990387747563915674022; + int256 blockNumber = 662703; + uint256 blockHash = 1119674286844400689540394420005977072742999649767515920196535047615668295813; + + emit LogStateUpdate(globalRoot, blockNumber, blockHash); + } + } + ); + + const L2_BLOCK_NUMBER: u64 = 662703; + const ANOTHER_ANVIL_PORT: u16 = 8548; + const EVENT_PROCESSING_TIME: u64 = 2; // Time to allow for event processing in seconds + + /// Test the event subscription and state update functionality + /// + /// This test performs the following steps: + /// 1. Sets up a mock Ethereum environment using Anvil + /// 2. Initializes necessary services (Database, Metrics) + /// 3. Deploys a dummy contract and sets up an Ethereum client + /// 4. Starts listening for state updates + /// 5. Fires an event from the dummy contract + /// 6. Waits for event processing and verifies the block number + #[rstest] + #[tokio::test] + async fn listen_and_update_state_when_event_fired_works() { + // Start Anvil instance + let anvil = Anvil::new() + .block_time(1) + .chain_id(1337) + .port(ANOTHER_ANVIL_PORT) + .try_spawn() + .expect("failed to spawn anvil instance"); + println!("Anvil started and running at `{}`", anvil.endpoint()); + + // Set up chain info + let chain_info = Arc::new(ChainConfig::madara_test()); + + // Set up database paths + let temp_dir = TempDir::new().expect("issue while creating temporary directory"); + let base_path = temp_dir.path().join("data"); + let backup_dir = Some(temp_dir.path().join("backups")); + + // Initialize database service + let db = Arc::new( + DatabaseService::new(&base_path, backup_dir, false, chain_info.clone(), Default::default()) + .await + .expect("Failed to create database service"), + ); + + let rpc_url: Url = anvil.endpoint().parse().expect("issue while parsing"); + let provider = ProviderBuilder::new().on_http(rpc_url); + + let contract = DummyContract::deploy(provider.clone()).await.unwrap(); + let core_contract = StarknetCoreContract::new(*contract.address(), provider.clone()); + + let eth_client = EthereumClient { provider: Arc::new(provider), l1_core_contract: core_contract.clone() }; + let l1_block_metrics = L1BlockMetrics::register().unwrap(); + + // Start listening for state updates + let listen_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + state_update_worker::( + Arc::clone(db.backend()), + Arc::new(Box::new(eth_client)), + ServiceContext::new_for_testing(), + Arc::new(l1_block_metrics), + ) + .await + .unwrap() + }) + }; + + let _ = contract.fireEvent().send().await.expect("Failed to fire event"); + + // Wait for event processing + tokio::time::sleep(Duration::from_secs(EVENT_PROCESSING_TIME)).await; + + // Verify the block number + let block_in_db = + db.backend().get_l1_last_confirmed_block().expect("Failed to get L1 last confirmed block number"); + + // Explicitly cancel the listen task, else it would be running in the background + listen_handle.abort(); + assert_eq!(block_in_db, Some(L2_BLOCK_NUMBER), "Block in DB does not match expected L2 block number"); + } +} diff --git a/crates/madara/client/eth/src/abis/starknet_core.json b/crates/madara/client/settlement_client/src/eth/starknet_core.json similarity index 100% rename from crates/madara/client/eth/src/abis/starknet_core.json rename to crates/madara/client/settlement_client/src/eth/starknet_core.json diff --git a/crates/madara/client/eth/src/l1_gas_price.rs b/crates/madara/client/settlement_client/src/gas_price.rs similarity index 64% rename from crates/madara/client/eth/src/l1_gas_price.rs rename to crates/madara/client/settlement_client/src/gas_price.rs index 723ccbd13..018007e98 100644 --- a/crates/madara/client/eth/src/l1_gas_price.rs +++ b/crates/madara/client/settlement_client/src/gas_price.rs @@ -1,23 +1,73 @@ -use crate::client::EthereumClient; -use alloy::eips::BlockNumberOrTag; -use alloy::providers::Provider; +use crate::client::ClientTrait; use anyhow::Context; use bigdecimal::BigDecimal; use mc_mempool::{GasPriceProvider, L1DataProvider}; -use std::{ - sync::Arc, - time::{Duration, UNIX_EPOCH}, -}; +use std::sync::Arc; +use std::time::{Duration, UNIX_EPOCH}; +use crate::messaging::CommonMessagingEventData; +use futures::Stream; +use mc_analytics::register_gauge_metric_instrument; use mp_utils::service::ServiceContext; +use opentelemetry::global::Error; +use opentelemetry::metrics::Gauge; +use opentelemetry::{global, KeyValue}; use std::time::SystemTime; -pub async fn gas_price_worker_once( - eth_client: &EthereumClient, +#[derive(Clone, Debug)] +pub struct L1BlockMetrics { + // L1 network metrics + pub l1_block_number: Gauge, + // gas price is also define in sync/metrics/block_metrics.rs but this would be the price from l1 + pub l1_gas_price_wei: Gauge, + pub l1_gas_price_strk: Gauge, +} + +impl L1BlockMetrics { + pub fn register() -> Result { + let common_scope_attributes = vec![KeyValue::new("crate", "L1 Block")]; + let eth_meter = global::meter_with_version( + "crates.l1block.opentelemetry", + Some("0.17"), + Some("https://opentelemetry.io/schemas/1.2.0"), + Some(common_scope_attributes.clone()), + ); + + let l1_block_number = register_gauge_metric_instrument( + ð_meter, + "l1_block_number".to_string(), + "Gauge for madara L1 block number".to_string(), + "".to_string(), + ); + + let l1_gas_price_wei = register_gauge_metric_instrument( + ð_meter, + "l1_gas_price_wei".to_string(), + "Gauge for madara L1 gas price in wei".to_string(), + "".to_string(), + ); + + let l1_gas_price_strk = register_gauge_metric_instrument( + ð_meter, + "l1_gas_price_strk".to_string(), + "Gauge for madara L1 gas price in strk".to_string(), + "".to_string(), + ); + + Ok(Self { l1_block_number, l1_gas_price_wei, l1_gas_price_strk }) + } +} + +pub async fn gas_price_worker_once( + settlement_client: Arc>>, l1_gas_provider: &GasPriceProvider, gas_price_poll_ms: Duration, -) -> anyhow::Result<()> { - match update_gas_price(eth_client, l1_gas_provider).await { + l1_block_metrics: Arc, +) -> anyhow::Result<()> +where + S: Stream>> + Send + 'static, +{ + match update_gas_price(settlement_client, l1_gas_provider, l1_block_metrics).await { Ok(_) => tracing::trace!("Updated gas prices"), Err(e) => tracing::error!("Failed to update gas prices: {:?}", e), } @@ -25,61 +75,63 @@ pub async fn gas_price_worker_once( let last_update_timestamp = l1_gas_provider.get_gas_prices_last_update(); let duration_since_last_update = SystemTime::now().duration_since(last_update_timestamp)?; - let last_update_timestemp = + let last_update_timestamp = last_update_timestamp.duration_since(UNIX_EPOCH).expect("SystemTime before UNIX EPOCH!").as_micros(); if duration_since_last_update > 10 * gas_price_poll_ms { anyhow::bail!( "Gas prices have not been updated for {} ms. Last update was at {}", duration_since_last_update.as_micros(), - last_update_timestemp + last_update_timestamp ); } anyhow::Ok(()) } -pub async fn gas_price_worker( - eth_client: Arc, +pub async fn gas_price_worker( + settlement_client: Arc>>, l1_gas_provider: GasPriceProvider, gas_price_poll_ms: Duration, mut ctx: ServiceContext, -) -> anyhow::Result<()> { + l1_block_metrics: Arc, +) -> anyhow::Result<()> +where + S: Stream>> + Send + 'static, +{ l1_gas_provider.update_last_update_timestamp(); let mut interval = tokio::time::interval(gas_price_poll_ms); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); while ctx.run_until_cancelled(interval.tick()).await.is_some() { - gas_price_worker_once(ð_client, &l1_gas_provider, gas_price_poll_ms).await?; + gas_price_worker_once( + Arc::clone(&settlement_client), + &l1_gas_provider, + gas_price_poll_ms, + l1_block_metrics.clone(), + ) + .await?; } anyhow::Ok(()) } -async fn update_gas_price(eth_client: &EthereumClient, l1_gas_provider: &GasPriceProvider) -> anyhow::Result<()> { - let block_number = eth_client.get_latest_block_number().await?; - let fee_history = eth_client.provider.get_fee_history(300, BlockNumberOrTag::Number(block_number), &[]).await?; - - // The RPC responds with 301 elements for some reason. It's also just safer to manually - // take the last 300. We choose 300 to get average gas caprice for last one hour (300 * 12 sec block - // time). - let (_, blob_fee_history_one_hour) = - fee_history.base_fee_per_blob_gas.split_at(fee_history.base_fee_per_blob_gas.len().max(300) - 300); - - let avg_blob_base_fee = if !blob_fee_history_one_hour.is_empty() { - blob_fee_history_one_hour.iter().sum::() / blob_fee_history_one_hour.len() as u128 - } else { - 0 // in case blob_fee_history_one_hour has 0 length - }; - - let eth_gas_price = fee_history.base_fee_per_gas.last().context("Getting eth gas price")?; - - l1_gas_provider.update_eth_l1_gas_price(*eth_gas_price); +async fn update_gas_price( + settlement_client: Arc>>, + l1_gas_provider: &GasPriceProvider, + l1_block_metrics: Arc, +) -> anyhow::Result<()> +where + S: Stream>> + Send + 'static, +{ + let (eth_gas_price, avg_blob_base_fee) = settlement_client.get_gas_prices().await?; + + l1_gas_provider.update_eth_l1_gas_price(eth_gas_price); l1_gas_provider.update_eth_l1_data_gas_price(avg_blob_base_fee); // fetch eth/strk price and update if let Some(oracle_provider) = &l1_gas_provider.oracle_provider { let (eth_strk_price, decimals) = oracle_provider.fetch_eth_strk_price().await.context("failed to retrieve ETH/STRK price")?; - let strk_gas_price = (BigDecimal::new((*eth_gas_price).into(), decimals.into()) + let strk_gas_price = (BigDecimal::new(eth_gas_price.into(), decimals.into()) / BigDecimal::new(eth_strk_price.into(), decimals.into())) .as_bigint_and_exponent(); let strk_data_gas_price = (BigDecimal::new(avg_blob_base_fee.into(), decimals.into()) @@ -101,18 +153,17 @@ async fn update_gas_price(eth_client: &EthereumClient, l1_gas_provider: &GasPric l1_gas_provider.update_last_update_timestamp(); // Update block number separately to avoid holding the lock for too long - update_l1_block_metrics(eth_client, l1_gas_provider).await?; + update_l1_block_metrics(settlement_client.get_latest_block_number().await?, l1_block_metrics, l1_gas_provider) + .await?; Ok(()) } async fn update_l1_block_metrics( - eth_client: &EthereumClient, + block_number: u64, + l1_block_metrics: Arc, l1_gas_provider: &GasPriceProvider, ) -> anyhow::Result<()> { - // Get the latest block number - let latest_block_number = eth_client.get_latest_block_number().await?; - // Get the current gas price let current_gas_price = l1_gas_provider.get_gas_prices(); let eth_gas_price = current_gas_price.eth_l1_gas_price; @@ -121,8 +172,8 @@ async fn update_l1_block_metrics( // Update the metrics - eth_client.l1_block_metrics.l1_block_number.record(latest_block_number, &[]); - eth_client.l1_block_metrics.l1_gas_price_wei.record(eth_gas_price as u64, &[]); + l1_block_metrics.l1_block_number.record(block_number, &[]); + l1_block_metrics.l1_gas_price_wei.record(eth_gas_price as u64, &[]); // We're ignoring l1_gas_price_strk @@ -132,7 +183,9 @@ async fn update_l1_block_metrics( #[cfg(test)] mod eth_client_gas_price_worker_test { use super::*; - use crate::client::eth_client_getter_test::{create_ethereum_client, get_shared_anvil}; + use crate::eth::eth_client_getter_test::{create_ethereum_client, get_shared_anvil}; + use crate::eth::event::EthereumEventStream; + use crate::eth::EthereumClientConfig; use httpmock::{MockServer, Regex}; use mc_mempool::GasPriceProvider; use std::time::SystemTime; @@ -145,16 +198,19 @@ mod eth_client_gas_price_worker_test { let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); + let l1_block_metrics = L1BlockMetrics::register().expect("Failed to register L1 block metrics"); + // Spawn the gas_price_worker in a separate task let worker_handle: JoinHandle> = tokio::spawn({ let eth_client = eth_client.clone(); let l1_gas_provider = l1_gas_provider.clone(); async move { - gas_price_worker( - Arc::new(eth_client), + gas_price_worker::( + Arc::new(Box::new(eth_client)), l1_gas_provider, Duration::from_millis(200), ServiceContext::new_for_testing(), + Arc::new(l1_block_metrics), ) .await } @@ -188,8 +244,15 @@ mod eth_client_gas_price_worker_test { let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); + let l1_block_metrics = L1BlockMetrics::register().expect("Failed to register L1 block metrics"); + // Run the worker for a short time - let worker_handle = gas_price_worker_once(ð_client, &l1_gas_provider, Duration::from_millis(200)); + let worker_handle = gas_price_worker_once::( + Arc::new(Box::new(eth_client)), + &l1_gas_provider, + Duration::from_millis(200), + Arc::new(l1_block_metrics), + ); // Wait for the worker to complete worker_handle.await.expect("issue with the gas worker"); @@ -208,8 +271,15 @@ mod eth_client_gas_price_worker_test { l1_gas_provider.update_eth_l1_gas_price(20); l1_gas_provider.set_gas_price_sync_enabled(false); + let l1_block_metrics = L1BlockMetrics::register().expect("Failed to register L1 block metrics"); + // Run the worker for a short time - let worker_handle = gas_price_worker_once(ð_client, &l1_gas_provider, Duration::from_millis(200)); + let worker_handle = gas_price_worker_once::( + Arc::new(Box::new(eth_client)), + &l1_gas_provider, + Duration::from_millis(200), + Arc::new(l1_block_metrics), + ); // Wait for the worker to complete worker_handle.await.expect("issue with the gas worker"); @@ -228,8 +298,15 @@ mod eth_client_gas_price_worker_test { l1_gas_provider.update_eth_l1_data_gas_price(20); l1_gas_provider.set_data_gas_price_sync_enabled(false); + let l1_block_metrics = L1BlockMetrics::register().expect("Failed to register L1 block metrics"); + // Run the worker for a short time - let worker_handle = gas_price_worker_once(ð_client, &l1_gas_provider, Duration::from_millis(200)); + let worker_handle = gas_price_worker_once::( + Arc::new(Box::new(eth_client)), + &l1_gas_provider, + Duration::from_millis(200), + Arc::new(l1_block_metrics), + ); // Wait for the worker to complete worker_handle.await.expect("issue with the gas worker"); @@ -245,6 +322,7 @@ mod eth_client_gas_price_worker_test { let mock_server = MockServer::start(); let addr = format!("http://{}", mock_server.address()); let eth_client = create_ethereum_client(Some(&addr)); + let l1_block_metrics = L1BlockMetrics::register().expect("Failed to register L1 block metrics"); let mock = mock_server.mock(|when, then| { when.method("POST").path("/").json_body_obj(&serde_json::json!({ @@ -278,11 +356,12 @@ mod eth_client_gas_price_worker_test { let result = timeout( timeout_duration, - gas_price_worker( - Arc::new(eth_client), + gas_price_worker::( + Arc::new(Box::new(eth_client)), l1_gas_provider.clone(), Duration::from_millis(200), ServiceContext::new_for_testing(), + Arc::new(l1_block_metrics), ), ) .await; @@ -312,9 +391,16 @@ mod eth_client_gas_price_worker_test { let l1_gas_provider = GasPriceProvider::new(); l1_gas_provider.update_last_update_timestamp(); + let l1_block_metrics = L1BlockMetrics::register().expect("Failed to register L1 block metrics"); // Update gas prices - update_gas_price(ð_client, &l1_gas_provider).await.expect("Failed to update gas prices"); + update_gas_price::( + Arc::new(Box::new(eth_client)), + &l1_gas_provider, + Arc::new(l1_block_metrics), + ) + .await + .expect("Failed to update gas prices"); // Access the updated gas prices let updated_prices = l1_gas_provider.get_gas_prices(); diff --git a/crates/madara/client/eth/src/lib.rs b/crates/madara/client/settlement_client/src/lib.rs similarity index 54% rename from crates/madara/client/eth/src/lib.rs rename to crates/madara/client/settlement_client/src/lib.rs index 4e8675d38..3f0f0e867 100644 --- a/crates/madara/client/eth/src/lib.rs +++ b/crates/madara/client/settlement_client/src/lib.rs @@ -1,7 +1,9 @@ pub mod client; pub mod error; -pub mod l1_gas_price; -pub mod l1_messaging; +pub mod eth; +pub mod gas_price; +pub mod messaging; +pub mod starknet; pub mod state_update; pub mod sync; pub mod utils; diff --git a/crates/madara/client/settlement_client/src/messaging.rs b/crates/madara/client/settlement_client/src/messaging.rs new file mode 100644 index 000000000..ec991f4dd --- /dev/null +++ b/crates/madara/client/settlement_client/src/messaging.rs @@ -0,0 +1,407 @@ +use crate::client::{ClientTrait, ClientType}; +use alloy::primitives::B256; +use futures::{Stream, StreamExt}; +use mc_db::l1_db::LastSyncedEventBlock; +use mc_db::MadaraBackend; +use mc_mempool::{Mempool, MempoolProvider}; +use mp_utils::service::ServiceContext; +use starknet_api::core::{ChainId, ContractAddress, EntryPointSelector, Nonce}; +use starknet_api::transaction::{Calldata, L1HandlerTransaction, TransactionVersion}; +use starknet_types_core::felt::Felt; +use std::sync::Arc; +use tracing::{error, info}; + +#[derive(Clone, Debug)] +pub struct CommonMessagingEventData { + pub from: Felt, + pub to: Felt, + pub selector: Felt, + pub nonce: Felt, + pub payload: Vec, + pub fee: Option, + pub transaction_hash: Felt, + pub message_hash: Option, + pub block_number: u64, + pub event_index: Option, +} + +pub async fn sync( + settlement_client: Arc>>, + backend: Arc, + chain_id: ChainId, + mempool: Arc, + mut ctx: ServiceContext, +) -> anyhow::Result<()> +where + S: Stream>> + Send + 'static, +{ + info!("⟠ Starting L1 Messages Syncing..."); + + let last_synced_event_block = match backend.messaging_last_synced_l1_block_with_event() { + Ok(Some(blk)) => blk, + Ok(None) => { + unreachable!("Should never be None") + } + Err(e) => { + error!("⟠ Madara Messaging DB unavailable: {:?}", e); + return Err(e.into()); + } + }; + + let stream = settlement_client.get_messaging_stream(last_synced_event_block).await?; + let mut event_stream = Box::pin(stream); + + while let Some(Some(event_result)) = ctx.run_until_cancelled(event_stream.next()).await { + if let Some(event) = event_result { + let event_data = event?; + let tx = parse_handle_message_transaction(&event_data)?; + let tx_nonce = tx.nonce; + + // Skip if already processed + if backend.has_l1_messaging_nonce(tx_nonce)? { + info!("Event already processed"); + return Ok(()); + } + + info!( + "Processing Message from block: {:?}, transaction_hash: {:?}, fromAddress: {:?}", + event_data.block_number, + format!("0x{}", event_data.transaction_hash.to_hex_string()), + format!("0x{}", event_data.transaction_hash.to_hex_string()), + ); + + // Check message hash and cancellation + let event_hash = settlement_client.get_messaging_hash(&event_data)?; + let converted_event_hash = match settlement_client.get_client_type() { + ClientType::ETH => B256::from_slice(event_hash.as_slice()).to_string(), + ClientType::STARKNET => Felt::from_bytes_be_slice(event_hash.as_slice()).to_hex_string(), + }; + info!("Checking for cancellation, event hash: {:?}", converted_event_hash); + + let cancellation_timestamp = settlement_client.get_l1_to_l2_message_cancellations(event_hash).await?; + if cancellation_timestamp != Felt::ZERO { + info!("Message was cancelled in block at timestamp: {:?}", cancellation_timestamp); + handle_cancelled_message(backend, tx_nonce)?; + return Ok(()); + } + + // Process message + match process_message(&backend, &event_data, &chain_id, mempool.clone()).await { + Ok(Some(tx_hash)) => { + info!( + "Message from block: {:?} submitted, transaction hash: {:?}", + event_data.block_number, tx_hash + ); + + let block_sent = + LastSyncedEventBlock::new(event_data.block_number, event_data.event_index.unwrap_or(0)); + backend.messaging_update_last_synced_l1_block_with_event(block_sent)?; + } + Ok(None) => {} + Err(e) => { + error!( + "Unexpected error while processing Message from block: {:?}, error: {:?}", + event_data.block_number, e + ); + return Err(e); + } + } + } + } + Ok(()) +} + +fn handle_cancelled_message(backend: Arc, nonce: Nonce) -> anyhow::Result<()> { + match backend.has_l1_messaging_nonce(nonce) { + Ok(false) => { + backend.set_l1_messaging_nonce(nonce)?; + } + Ok(true) => {} + Err(e) => { + error!("Unexpected DB error: {:?}", e); + return Err(e.into()); + } + } + Ok(()) +} + +pub fn parse_handle_message_transaction(event: &CommonMessagingEventData) -> anyhow::Result { + let calldata: Calldata = { + let mut calldata: Vec<_> = Vec::with_capacity(event.payload.len() + 1); + calldata.push(event.from); + calldata.extend(event.payload.clone()); + Calldata(Arc::new(calldata)) + }; + + Ok(L1HandlerTransaction { + nonce: Nonce(event.nonce), + contract_address: ContractAddress(event.to.try_into()?), + entry_point_selector: EntryPointSelector(event.selector), + calldata, + version: TransactionVersion(Felt::ZERO), + }) +} + +async fn process_message( + backend: &MadaraBackend, + event: &CommonMessagingEventData, + _chain_id: &ChainId, + mempool: Arc, +) -> anyhow::Result> { + let transaction = parse_handle_message_transaction(event)?; + let tx_nonce = transaction.nonce; + let fees = event.fee; + + // Ensure that L1 message has not been executed + match backend.has_l1_messaging_nonce(tx_nonce) { + Ok(false) => { + backend.set_l1_messaging_nonce(tx_nonce)?; + } + Ok(true) => { + tracing::debug!("⟠ Event already processed: {:?}", transaction); + return Ok(None); + } + Err(e) => { + error!("⟠ Unexpected DB error: {:?}", e); + return Err(e.into()); + } + }; + + let res = mempool.tx_accept_l1_handler(transaction.into(), fees.unwrap_or(0))?; + + Ok(Some(res.transaction_hash)) +} + +#[cfg(test)] +mod messaging_module_tests { + use super::*; + use crate::client::{DummyConfig, DummyStream, MockClientTrait}; + use futures::stream; + use mc_db::DatabaseService; + use mc_mempool::{GasPriceProvider, L1DataProvider, MempoolLimits}; + use mp_chain_config::ChainConfig; + use rstest::{fixture, rstest}; + use starknet_types_core::felt::Felt; + use std::time::Duration; + use tempfile::TempDir; + use tokio::time::timeout; + + // Helper function to create a mock event + fn create_mock_event(block_number: u64, nonce: u64) -> CommonMessagingEventData { + CommonMessagingEventData { + block_number, + transaction_hash: Felt::from(1), + event_index: Some(0), + from: Felt::from(123), + to: Felt::from(456), + selector: Felt::from(789), + payload: vec![Felt::from(1), Felt::from(2)], + nonce: Felt::from(nonce), + fee: Some(1000), + message_hash: None, + } + } + + struct MessagingTestRunner { + client: MockClientTrait, + db: Arc, + mempool: Arc, + ctx: ServiceContext, + chain_config: Arc, + } + + #[fixture] + async fn setup_messaging_tests() -> MessagingTestRunner { + // Set up chain info + let chain_config = Arc::new(ChainConfig::madara_test()); + + // Set up database paths + let temp_dir = TempDir::new().expect("issue while creating temporary directory"); + let base_path = temp_dir.path().join("data"); + let backup_dir = Some(temp_dir.path().join("backups")); + + // Initialize database service + let db = Arc::new( + DatabaseService::new(&base_path, backup_dir, false, chain_config.clone(), Default::default()) + .await + .expect("Failed to create database service"), + ); + + let l1_gas_setter = GasPriceProvider::new(); + let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); + + let mempool = Arc::new(Mempool::new( + Arc::clone(db.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::for_testing(), + )); + + // Create a new context for mocking the static new() method + let ctx = MockClientTrait::new_context(); + ctx.expect().returning(|_| Ok(MockClientTrait::default())); + let mock_client = MockClientTrait::new(DummyConfig).await.expect("Unable to init new mock client"); + + let ctx = ServiceContext::new_for_testing(); + + MessagingTestRunner { client: mock_client, db, mempool, ctx, chain_config } + } + + #[rstest] + #[tokio::test] + async fn test_sync_processes_new_message( + #[future] setup_messaging_tests: MessagingTestRunner, + ) -> anyhow::Result<()> { + let MessagingTestRunner { mut client, db, mempool, ctx, chain_config } = setup_messaging_tests.await; + + // Setup mock event + let mock_event = create_mock_event(100, 1); + let event_clone = mock_event.clone(); + + let backend = db.backend(); + + // Setup mock for last synced block + backend.messaging_update_last_synced_l1_block_with_event(LastSyncedEventBlock::new(99, 0))?; + + // Mock get_messaging_stream + client + .expect_get_messaging_stream() + .times(1) + .returning(move |_| Ok(Box::pin(stream::iter(vec![Some(Ok(mock_event.clone()))])))); + + // Mock get_messaging_hash + client.expect_get_messaging_hash().times(1).returning(|_| Ok(vec![0u8; 32])); + + // Mock get_client_type + client.expect_get_client_type().times(1).returning(|| ClientType::ETH); + + // Mock get_l1_to_l2_message_cancellations + client.expect_get_l1_to_l2_message_cancellations().times(1).returning(|_| Ok(Felt::ZERO)); + + let client: Arc>> = + Arc::new(Box::new(client)); + + timeout( + Duration::from_secs(1), + sync(client, backend.clone(), chain_config.chain_id.clone(), mempool.clone(), ctx), + ) + .await??; + + // Verify the message was processed + assert!(backend.has_l1_messaging_nonce(Nonce(event_clone.nonce))?); + + Ok(()) + } + + #[rstest] + #[tokio::test] + async fn test_sync_handles_cancelled_message( + #[future] setup_messaging_tests: MessagingTestRunner, + ) -> anyhow::Result<()> { + let MessagingTestRunner { mut client, db, mempool, ctx, chain_config } = setup_messaging_tests.await; + + let backend = db.backend(); + + // Setup mock event + let mock_event = create_mock_event(100, 1); + let event_clone = mock_event.clone(); + + // Setup mock for last synced block + backend.messaging_update_last_synced_l1_block_with_event(LastSyncedEventBlock::new(99, 0))?; + + // Mock get_messaging_stream + client + .expect_get_messaging_stream() + .times(1) + .returning(move |_| Ok(Box::pin(stream::iter(vec![Some(Ok(mock_event.clone()))])))); + + // Mock get_messaging_hash + client.expect_get_messaging_hash().times(1).returning(|_| Ok(vec![0u8; 32])); + + // Mock get_client_type + client.expect_get_client_type().times(1).returning(|| ClientType::ETH); + + // Mock get_l1_to_l2_message_cancellations - return non-zero to indicate cancellation + client.expect_get_l1_to_l2_message_cancellations().times(1).returning(|_| Ok(Felt::from(12345))); + + let client: Arc>> = + Arc::new(Box::new(client)); + + timeout( + Duration::from_secs(1), + sync(client, backend.clone(), chain_config.chain_id.clone(), mempool.clone(), ctx), + ) + .await??; + + // Verify the cancelled message was handled correctly + assert!(backend.has_l1_messaging_nonce(Nonce(event_clone.nonce))?); + + Ok(()) + } + + #[rstest] + #[tokio::test] + async fn test_sync_skips_already_processed_message( + #[future] setup_messaging_tests: MessagingTestRunner, + ) -> anyhow::Result<()> { + let MessagingTestRunner { mut client, db, mempool, ctx, chain_config } = setup_messaging_tests.await; + + let backend = db.backend(); + + // Setup mock event + let mock_event = create_mock_event(100, 1); + + // Pre-set the nonce as processed + backend.set_l1_messaging_nonce(Nonce(mock_event.nonce))?; + + // Setup mock for last synced block + backend.messaging_update_last_synced_l1_block_with_event(LastSyncedEventBlock::new(99, 0))?; + + // Mock get_messaging_stream + client + .expect_get_messaging_stream() + .times(1) + .returning(move |_| Ok(Box::pin(stream::iter(vec![Some(Ok(mock_event.clone()))])))); + + // Mock get_messaging_hash - should not be called + client.expect_get_messaging_hash().times(0); + + let client: Arc>> = + Arc::new(Box::new(client)); + + timeout( + Duration::from_secs(1), + sync(client, backend.clone(), chain_config.chain_id.clone(), mempool.clone(), ctx), + ) + .await??; + + Ok(()) + } + + #[rstest] + #[tokio::test] + async fn test_sync_handles_stream_errors( + #[future] setup_messaging_tests: MessagingTestRunner, + ) -> anyhow::Result<()> { + let MessagingTestRunner { mut client, db, mempool, ctx, chain_config } = setup_messaging_tests.await; + + let backend = db.backend(); + + // Setup mock for last synced block + backend.messaging_update_last_synced_l1_block_with_event(LastSyncedEventBlock::new(99, 0))?; + + // Mock get_messaging_stream to return error + client + .expect_get_messaging_stream() + .times(1) + .returning(move |_| Ok(Box::pin(stream::iter(vec![Some(Err(anyhow::anyhow!("Stream error")))])))); + + let client: Arc>> = + Arc::new(Box::new(client)); + + let result = sync(client, backend.clone(), chain_config.chain_id.clone(), mempool.clone(), ctx).await; + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Stream error")); + + Ok(()) + } +} diff --git a/crates/madara/client/settlement_client/src/starknet/event.rs b/crates/madara/client/settlement_client/src/starknet/event.rs new file mode 100644 index 000000000..1a0230399 --- /dev/null +++ b/crates/madara/client/settlement_client/src/starknet/event.rs @@ -0,0 +1,377 @@ +use crate::messaging::CommonMessagingEventData; +use futures::Stream; +use log::error; +use starknet_core::types::{BlockId, EmittedEvent, EventFilter}; +use starknet_providers::jsonrpc::HttpTransport; +use starknet_providers::{JsonRpcClient, Provider}; +use starknet_types_core::felt::Felt; +use std::collections::HashSet; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use tokio::time::sleep; + +type FutureType = Pin, EventFilter)>> + Send>>; + +pub struct StarknetEventStream { + provider: Arc>, + filter: EventFilter, + processed_events: HashSet, + future: Option, + polling_interval: Duration, +} + +impl StarknetEventStream { + pub fn new(provider: Arc>, filter: EventFilter, polling_interval: Duration) -> Self { + Self { provider, filter, processed_events: HashSet::new(), future: None, polling_interval } + } +} + +impl Stream for StarknetEventStream { + type Item = Option>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.future.is_none() { + let provider = self.provider.clone(); + let filter = self.filter.clone(); + let processed_events = self.processed_events.clone(); + let polling_interval = self.polling_interval; + + async fn fetch_events( + provider: Arc>, + mut filter: EventFilter, + processed_events: HashSet, + polling_interval: Duration, + ) -> anyhow::Result<(Option, EventFilter)> { + // Adding sleep to introduce delay + sleep(polling_interval).await; + + let mut event_vec = Vec::new(); + let mut page_indicator = false; + let mut continuation_token: Option = None; + + while !page_indicator { + let events = provider + .get_events( + EventFilter { + from_block: filter.from_block, + to_block: filter.to_block, + address: filter.address, + keys: filter.keys.clone(), + }, + continuation_token.clone(), + 1000, + ) + .await?; + + event_vec.extend(events.events); + if let Some(token) = events.continuation_token { + continuation_token = Some(token); + } else { + page_indicator = true; + } + } + + let latest_block = provider.block_number().await?; + + for event in event_vec.clone() { + let event_nonce = event.data[1]; + if !processed_events.contains(&event_nonce) { + return Ok((Some(event), filter)); + } + } + + filter.from_block = filter.to_block; + filter.to_block = Some(BlockId::Number(latest_block)); + + Ok((None, filter)) + } + + let future = async move { + let (event, updated_filter) = + fetch_events(provider, filter, processed_events, polling_interval).await?; + Ok((event, updated_filter)) + }; + + self.future = Some(Box::pin(future)); + } + + match self.future.as_mut() { + Some(fut) => match fut.as_mut().poll(cx) { + Poll::Ready(result) => { + self.future = None; + match result { + Ok((Some(event), updated_filter)) => { + // Update the filter + self.filter = updated_filter; + // Insert the event nonce before returning + self.processed_events.insert(event.data[1]); + + let event_data = event + .block_number + .ok_or_else(|| anyhow::anyhow!("Unable to get block number from event")) + .map(|block_number| CommonMessagingEventData { + from: event.keys[2], + to: event.keys[3], + selector: event.data[0], + nonce: event.data[1], + payload: { + let mut payload_array = vec![]; + event.data.iter().skip(3).for_each(|data| { + payload_array.push(*data); + }); + payload_array + }, + fee: None, + transaction_hash: event.transaction_hash, + message_hash: Some(event.keys[1]), + block_number, + event_index: None, + }); + + Poll::Ready(Some(Some(event_data))) + } + Ok((None, updated_filter)) => { + // Update the filter even when no events are found + self.filter = updated_filter; + Poll::Ready(Some(None)) + } + Err(e) => Poll::Ready(Some(Some(Err(e)))), + } + } + Poll::Pending => Poll::Pending, + }, + None => { + // If the code comes here then this is an unexpected behaviour. + // Following scenarios can lead to this: + // - Not able to call the RPC and fetch events. + // - Connection Issues. + error!("Starknet Event Stream : Unable to fetch events from starknet stream. Restart Sequencer."); + Poll::Ready(Some(None)) + } + } + } +} + +#[cfg(test)] +mod starknet_event_stream_tests { + use super::*; + use futures::StreamExt; + use httpmock::prelude::*; + use httpmock::Mock; + use rstest::rstest; + use serde_json::json; + use std::str::FromStr; + use url::Url; + + struct MockStarknetServer { + server: MockServer, + } + + impl MockStarknetServer { + fn new() -> Self { + Self { server: MockServer::start() } + } + + fn url(&self) -> String { + self.server.base_url() + } + + fn mock_get_events(&self, events: Vec, continuation_token: Option<&str>) -> Mock { + self.server.mock(|when, then| { + when.method(POST).path("/").header("Content-Type", "application/json").matches(|req| { + let body = req.body.clone().unwrap(); + let body_str = std::str::from_utf8(body.as_slice()).unwrap_or_default(); + body_str.contains("starknet_getEvents") + }); + + then.status(200).json_body(json!({ + "jsonrpc": "2.0", + "id": 1, + "result": { + "events": events, + "continuation_token": continuation_token + } + })); + }) + } + + fn mock_block_number(&self, block_number: u64) -> Mock { + self.server.mock(|when, then| { + when.method(POST).path("/").matches(|req| { + let body = req.body.clone().unwrap(); + let body_str = std::str::from_utf8(body.as_slice()).unwrap_or_default(); + body_str.contains("starknet_blockNumber") + }); + + then.status(200).json_body(json!({ + "jsonrpc": "2.0", + "id": 1, + "result": block_number + })); + }) + } + + fn mock_error_response(&self, error_code: i64, error_message: &str) -> Mock { + self.server.mock(|when, then| { + when.method(POST).path("/"); + + then.status(200).json_body(json!({ + "jsonrpc": "2.0", + "id": 1, + "error": { + "code": error_code, + "message": error_message + } + })); + }) + } + } + + fn create_test_event(nonce: u64, block_number: u64) -> EmittedEvent { + EmittedEvent { + from_address: Default::default(), + transaction_hash: Felt::from_hex("0x1234").unwrap(), + block_number: Some(block_number), + block_hash: Some(Felt::from_hex("0x5678").unwrap()), + data: vec![ + Felt::from_hex("0x3333").unwrap(), // selector + Felt::from_hex(&format!("0x{:x}", nonce)).unwrap(), // nonce + Felt::from_hex("0x5555").unwrap(), // len + Felt::from_hex("0x6666").unwrap(), // payload[0] + Felt::from_hex("0x7777").unwrap(), // payload[1] + ], + keys: vec![ + Felt::from_hex("0x0001").unwrap(), // event key + Felt::from_hex("0x0001").unwrap(), // message_hash + Felt::from_hex("0x1111").unwrap(), // from + Felt::from_hex("0x2222").unwrap(), // to + ], + } + } + + fn setup_stream(mock_server: &MockStarknetServer) -> StarknetEventStream { + let provider = JsonRpcClient::new(HttpTransport::new(Url::from_str(&mock_server.url()).unwrap())); + + StarknetEventStream::new( + Arc::new(provider), + EventFilter { + from_block: Some(BlockId::Number(0)), + to_block: Some(BlockId::Number(100)), + address: Some(Felt::from_hex("0x1").unwrap()), + keys: Some(vec![]), + }, + Duration::from_secs(1), + ) + } + + #[tokio::test] + #[rstest] + async fn test_single_event() { + let mock_server = MockStarknetServer::new(); + + // Setup mocks + let test_event = create_test_event(1, 100); + let events_mock = mock_server.mock_get_events(vec![test_event], None); + let block_mock = mock_server.mock_block_number(101); + + let mut stream = Box::pin(setup_stream(&mock_server)); + + if let Some(Some(Ok(event_data))) = stream.next().await { + assert_eq!(event_data.block_number, 100); + assert!(event_data.message_hash.is_some()); + assert_eq!(event_data.payload.len(), 2); + } else { + panic!("Expected successful event"); + } + + // Verify mocks were called + events_mock.assert(); + events_mock.assert(); + block_mock.assert(); + } + + #[tokio::test] + #[rstest] + async fn test_error_handling() { + let mock_server = MockStarknetServer::new(); + + let error_mock = mock_server.mock_error_response(-32000, "Internal error"); + + let mut stream = Box::pin(setup_stream(&mock_server)); + + match stream.next().await { + Some(Some(Err(e))) => { + assert!(e.to_string().contains("Internal error")); + } + _ => panic!("Expected error"), + } + + error_mock.assert(); + } + + #[tokio::test] + #[rstest] + async fn test_empty_events() { + let mock_server = MockStarknetServer::new(); + + let events_mock = mock_server.mock_get_events(vec![], None); + let block_mock = mock_server.mock_block_number(100); + + let mut stream = Box::pin(setup_stream(&mock_server)); + + match stream.next().await { + Some(None) => { /* Expected */ } + _ => panic!("Expected None for empty events"), + } + + events_mock.assert(); + block_mock.assert(); + } + + #[tokio::test] + #[rstest] + async fn test_multiple_events_in_single_block() { + let mock_server = MockStarknetServer::new(); + + // Create two events with different nonces but same block number + let test_event1 = create_test_event(1, 100); + let test_event2 = create_test_event(2, 100); + + // Setup mocks for events and block number + let events_mock = mock_server.mock_get_events(vec![test_event1.clone(), test_event2.clone()], None); + let block_mock = mock_server.mock_block_number(101); + + let mut stream = Box::pin(setup_stream(&mock_server)); + + if let Some(Some(Ok(event_data1))) = stream.next().await { + assert_eq!(event_data1.block_number, 100); + assert_eq!(event_data1.nonce, Felt::from_hex("0x1").unwrap()); + assert_eq!(event_data1.payload.len(), 2); + assert_eq!(event_data1.transaction_hash, test_event1.transaction_hash); + } else { + panic!("Expected first event"); + } + + if let Some(Some(Ok(event_data2))) = stream.next().await { + assert_eq!(event_data2.block_number, 100); + assert_eq!(event_data2.nonce, Felt::from_hex("0x2").unwrap()); + assert_eq!(event_data2.payload.len(), 2); + assert_eq!(event_data2.transaction_hash, test_event2.transaction_hash); + } else { + panic!("Expected second event"); + } + + // Verify that there are no more events + match stream.next().await { + Some(None) => { /* Expected */ } + _ => panic!("Expected None after processing all events"), + } + + // Verify mocks were called + events_mock.assert(); + block_mock.assert(); + } +} diff --git a/crates/madara/client/settlement_client/src/starknet/mod.rs b/crates/madara/client/settlement_client/src/starknet/mod.rs new file mode 100644 index 000000000..5361272d7 --- /dev/null +++ b/crates/madara/client/settlement_client/src/starknet/mod.rs @@ -0,0 +1,853 @@ +use crate::client::{ClientTrait, ClientType}; +use crate::gas_price::L1BlockMetrics; +use crate::messaging::CommonMessagingEventData; +use crate::starknet::event::StarknetEventStream; +use crate::state_update::{update_l1, StateUpdate}; +use anyhow::{anyhow, bail}; +use async_trait::async_trait; +use bigdecimal::ToPrimitive; +use mc_db::l1_db::LastSyncedEventBlock; +use mc_db::MadaraBackend; +use mp_utils::service::ServiceContext; +use starknet_core::types::{BlockId, BlockTag, EmittedEvent, EventFilter, FunctionCall}; +use starknet_core::utils::get_selector_from_name; +use starknet_crypto::poseidon_hash_many; +use starknet_providers::jsonrpc::HttpTransport; +use starknet_providers::{JsonRpcClient, Provider}; +use starknet_types_core::felt::Felt; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::time::sleep; +use url::Url; + +pub mod event; +#[cfg(test)] +pub mod utils; + +#[derive(Debug)] +pub struct StarknetClient { + pub provider: Arc>, + pub l2_core_contract: Felt, + pub processed_update_state_block: AtomicU64, +} + +#[derive(Clone)] +pub struct StarknetClientConfig { + pub url: Url, + pub l2_contract_address: Felt, +} + +impl Clone for StarknetClient { + fn clone(&self) -> Self { + StarknetClient { + provider: Arc::clone(&self.provider), + l2_core_contract: self.l2_core_contract, + processed_update_state_block: AtomicU64::new(self.processed_update_state_block.load(Ordering::Relaxed)), + } + } +} + +// TODO : Remove github refs after implementing the zaun imports +// Imp ⚠️ : zaun is not yet updated with latest app chain core contract implementations +// For this reason we are adding our own call implementations. +#[async_trait] +impl ClientTrait for StarknetClient { + type Config = StarknetClientConfig; + + fn get_client_type(&self) -> ClientType { + ClientType::STARKNET + } + + async fn new(config: Self::Config) -> anyhow::Result + where + Self: Sized, + { + let provider = JsonRpcClient::new(HttpTransport::new(config.url)); + // Check if l2 contract exists : + // If contract is not there this will error out. + provider.get_class_at(BlockId::Tag(BlockTag::Latest), config.l2_contract_address).await?; + Ok(Self { + provider: Arc::new(provider), + l2_core_contract: config.l2_contract_address, + processed_update_state_block: AtomicU64::new(0), // Keeping this as 0 initially when client is initialized. + }) + } + + async fn get_latest_block_number(&self) -> anyhow::Result { + let block_number = self.provider.block_number().await?; + Ok(block_number) + } + + async fn get_last_event_block_number(&self) -> anyhow::Result { + let latest_block = self.get_latest_block_number().await?; + // If block on l2 is not greater than or equal to 6000 we will consider the last block to 0. + let last_block = if latest_block <= 6000 { 0 } else { latest_block - 6000 }; + let last_events = self + .get_events( + BlockId::Number(last_block), + BlockId::Number(latest_block), + self.l2_core_contract, + // taken from : https://github.com/keep-starknet-strange/piltover/blob/main/src/appchain.cairo#L102 + vec![get_selector_from_name("LogStateUpdate")?], + ) + .await?; + + let last_update_state_event = last_events.last(); + match last_update_state_event { + Some(event) => { + /* + GitHub Ref : https://github.com/keep-starknet-strange/piltover/blob/main/src/appchain.cairo#L101 + Event description : + ------------------ + #[derive(Drop, starknet::Event)] + struct LogStateUpdate { + state_root: felt252, + block_number: felt252, + block_hash: felt252, + } + */ + if event.data.len() != 3 { + return Err(anyhow!("Event response invalid !!")); + } + // Block number management in case of pending block number events. + match event.block_number { + Some(block_number) => Ok(block_number), + None => Ok(self.get_latest_block_number().await? + 1), + } + } + None => { + bail!("No event found") + } + } + } + + async fn get_last_verified_block_number(&self) -> anyhow::Result { + // Block Number index in call response : 1 + Ok(u64::try_from(self.get_state_call().await?[1])?) + } + + async fn get_last_verified_state_root(&self) -> anyhow::Result { + // State Root index in call response : 0 + Ok(self.get_state_call().await?[0]) + } + + async fn get_last_verified_block_hash(&self) -> anyhow::Result { + // Block Hash index in call response : 2 + Ok(self.get_state_call().await?[2]) + } + + async fn get_initial_state(&self) -> anyhow::Result { + let block_number = self.get_last_verified_block_number().await?; + let block_hash = self.get_last_verified_block_hash().await?; + let global_root = self.get_last_verified_state_root().await?; + + Ok(StateUpdate { global_root, block_number, block_hash }) + } + + async fn listen_for_update_state_events( + &self, + backend: Arc, + mut ctx: ServiceContext, + l1_block_metrics: Arc, + ) -> anyhow::Result<()> { + while let Some(events) = ctx + .run_until_cancelled(async { + let latest_block = self.get_latest_block_number().await?; + let selector = get_selector_from_name("LogStateUpdate")?; + + self.get_events( + BlockId::Number(latest_block), + BlockId::Number(latest_block), + self.l2_core_contract, + vec![selector], + ) + .await + }) + .await + { + let events_fetched = events?; + if let Some(event) = events_fetched.last() { + let data = event; + let block_number = data.data[1].to_u64().ok_or(anyhow!("Block number conversion failed"))?; + + let current_processed = self.processed_update_state_block.load(Ordering::Relaxed); + if current_processed < block_number { + let formatted_event = + StateUpdate { block_number, global_root: data.data[0], block_hash: data.data[2] }; + update_l1(&backend, formatted_event, l1_block_metrics.clone())?; + self.processed_update_state_block.store(block_number, Ordering::Relaxed); + } + } + + sleep(Duration::from_millis(100)).await; + } + Ok(()) + } + + // We are returning here (0,0) because we are assuming that + // the L3s will have zero gas prices. for any transaction. + // So that's why we will keep the prices as 0 returning from + // our settlement client. + async fn get_gas_prices(&self) -> anyhow::Result<(u128, u128)> { + Ok((0, 0)) + } + + fn get_messaging_hash(&self, event: &CommonMessagingEventData) -> anyhow::Result> { + Ok(poseidon_hash_many(&self.event_to_felt_array(event)).to_bytes_be().to_vec()) + } + + async fn get_l1_to_l2_message_cancellations(&self, msg_hash: Vec) -> anyhow::Result { + let call_res = self + .provider + .call( + FunctionCall { + contract_address: self.l2_core_contract, + // No get_message_cancellation function in pilt over as of now + entry_point_selector: get_selector_from_name("l1_to_l2_message_cancellations")?, + calldata: vec![Felt::from_bytes_be_slice(msg_hash.as_slice())], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + // Ensure correct read call : u256 (0, 0) + if call_res.len() != 2 { + return Err(anyhow!( + "Call response invalid : l1_to_l2_message_cancellations should return only 2 values !!" + )); + } + Ok(call_res[0]) + } + + type StreamType = StarknetEventStream; + async fn get_messaging_stream( + &self, + last_synced_event_block: LastSyncedEventBlock, + ) -> anyhow::Result { + let filter = EventFilter { + from_block: Some(BlockId::Number(last_synced_event_block.block_number)), + to_block: Some(BlockId::Number(self.get_latest_block_number().await?)), + address: Some(self.l2_core_contract), + keys: Some(vec![vec![get_selector_from_name("MessageSent")?]]), + }; + Ok(StarknetEventStream::new(self.provider.clone(), filter, Duration::from_secs(1))) + } +} + +impl StarknetClient { + async fn get_events( + &self, + from_block: BlockId, + to_block: BlockId, + contract_address: Felt, + keys: Vec, + ) -> anyhow::Result> { + let mut event_vec = Vec::new(); + let mut page_indicator = false; + let mut continuation_token: Option = None; + + while !page_indicator { + let events = self + .provider + .get_events( + EventFilter { + from_block: Some(from_block), + to_block: Some(to_block), + address: Some(contract_address), + keys: Some(vec![keys.clone()]), + }, + continuation_token.clone(), + 1000, + ) + .await?; + + event_vec.extend(events.events); + if let Some(token) = events.continuation_token { + continuation_token = Some(token); + } else { + page_indicator = true; + } + } + + Ok(event_vec) + } + + fn event_to_felt_array(&self, event: &CommonMessagingEventData) -> Vec { + let mut felt_vec = vec![event.from, event.to, event.selector, event.nonce]; + felt_vec.push(Felt::from(event.payload.len())); + event.payload.clone().into_iter().for_each(|felt| { + felt_vec.push(felt); + }); + + felt_vec + } + + pub async fn get_state_call(&self) -> anyhow::Result> { + let call_res = self + .provider + .call( + FunctionCall { + contract_address: self.l2_core_contract, + /* + GitHub Ref : https://github.com/keep-starknet-strange/piltover/blob/main/src/state/component.cairo#L59 + Function Call response : (StateRoot, BlockNumber, BlockHash) + */ + entry_point_selector: get_selector_from_name("get_state")?, + calldata: vec![], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + if call_res.len() != 3 { + return Err(anyhow!("Call response invalid !!")); + } + Ok(call_res) + } +} + +#[cfg(test)] +pub mod starknet_client_tests { + use crate::client::ClientTrait; + use crate::starknet::utils::{prepare_starknet_client_test, send_state_update, MADARA_PORT}; + use crate::starknet::{StarknetClient, StarknetClientConfig}; + use crate::state_update::StateUpdate; + use serial_test::serial; + use starknet_accounts::ConnectedAccount; + use starknet_core::types::BlockId; + use starknet_core::types::MaybePendingBlockWithTxHashes::{Block, PendingBlock}; + use starknet_providers::jsonrpc::HttpTransport; + use starknet_providers::ProviderError::StarknetError; + use starknet_providers::{JsonRpcClient, Provider}; + use starknet_types_core::felt::Felt; + use std::str::FromStr; + use std::time::Duration; + use tokio::time::sleep; + use url::Url; + + #[serial] + #[tokio::test] + async fn fail_create_new_client_contract_does_not_exists() -> anyhow::Result<()> { + prepare_starknet_client_test().await?; + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: Felt::from_str("0xdeadbeef")?, + }) + .await; + assert!(starknet_client.is_err(), "Should fail to create a new client"); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn create_new_client_contract_exists_starknet_client() -> anyhow::Result<()> { + let (_, deployed_address, _madara) = prepare_starknet_client_test().await?; + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: deployed_address, + }) + .await; + assert!(starknet_client.is_ok(), "Should not fail to create a new client"); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn get_last_event_block_number_works_starknet_client() -> anyhow::Result<()> { + let (account, deployed_address, _madara) = prepare_starknet_client_test().await?; + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: deployed_address, + }) + .await?; + + // sending state updates : + send_state_update( + &account, + deployed_address, + StateUpdate { + block_number: 99, + global_root: Felt::from_hex("0xdeadbeef")?, + block_hash: Felt::from_hex("0xdeadbeef")?, + }, + ) + .await?; + let last_event_block_number = send_state_update( + &account, + deployed_address, + StateUpdate { + block_number: 100, + global_root: Felt::from_hex("0xdeadbeef")?, + block_hash: Felt::from_hex("0xdeadbeef")?, + }, + ) + .await?; + + poll_on_block_completion(last_event_block_number, account.provider(), 100).await?; + + let latest_event_block_number = starknet_client.get_last_event_block_number().await?; + assert_eq!(latest_event_block_number, last_event_block_number, "Latest event should have block number 100"); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn get_last_verified_block_hash_works_starknet_client() -> anyhow::Result<()> { + let (account, deployed_address, _madara) = prepare_starknet_client_test().await?; + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: deployed_address, + }) + .await?; + + // sending state updates : + let block_hash_event = Felt::from_hex("0xdeadbeef")?; + let global_root_event = Felt::from_hex("0xdeadbeef")?; + let block_number = send_state_update( + &account, + deployed_address, + StateUpdate { block_number: 100, global_root: global_root_event, block_hash: block_hash_event }, + ) + .await?; + poll_on_block_completion(block_number, account.provider(), 100).await?; + + let last_verified_block_hash = starknet_client.get_last_verified_block_hash().await?; + assert_eq!(last_verified_block_hash, block_hash_event, "Block hash should match"); + + Ok(()) + } + + #[serial] + #[tokio::test] + async fn get_last_state_root_works_starknet_client() -> anyhow::Result<()> { + let (account, deployed_address, _madara) = prepare_starknet_client_test().await?; + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: deployed_address, + }) + .await?; + + // sending state updates : + let block_hash_event = Felt::from_hex("0xdeadbeef")?; + let global_root_event = Felt::from_hex("0xdeadbeef")?; + let block_number = send_state_update( + &account, + deployed_address, + StateUpdate { block_number: 100, global_root: global_root_event, block_hash: block_hash_event }, + ) + .await?; + poll_on_block_completion(block_number, account.provider(), 100).await?; + + let last_verified_state_root = starknet_client.get_last_verified_state_root().await?; + assert_eq!(last_verified_state_root, global_root_event, "Last state root should match"); + + Ok(()) + } + + #[serial] + #[tokio::test] + async fn get_last_verified_block_number_works_starknet_client() -> anyhow::Result<()> { + let (account, deployed_address, _madara) = prepare_starknet_client_test().await?; + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: deployed_address, + }) + .await?; + + // sending state updates : + let data_felt = Felt::from_hex("0xdeadbeef")?; + let block_number = 100; + let event_block_number = send_state_update( + &account, + deployed_address, + StateUpdate { block_number, global_root: data_felt, block_hash: data_felt }, + ) + .await?; + poll_on_block_completion(event_block_number, account.provider(), 100).await?; + + let last_verified_block_number = starknet_client.get_last_verified_block_number().await?; + assert_eq!(last_verified_block_number, block_number, "Last verified block should match"); + + Ok(()) + } + + const RETRY_DELAY: Duration = Duration::from_millis(100); + + pub async fn poll_on_block_completion( + block_number: u64, + provider: &JsonRpcClient, + max_retries: u64, + ) -> anyhow::Result<()> { + for try_count in 0..=max_retries { + match provider.get_block_with_tx_hashes(BlockId::Number(block_number)).await { + Ok(Block(_)) => { + return Ok(()); + } + Ok(PendingBlock(_)) | Err(StarknetError(starknet_core::types::StarknetError::BlockNotFound)) => { + if try_count == max_retries { + return Err(anyhow::anyhow!("Max retries reached while polling for block {}", block_number)); + } + sleep(RETRY_DELAY).await; + } + Err(e) => { + return Err(anyhow::anyhow!("Provider error while polling block {}: {}", block_number, e)); + } + } + } + + // This line should never be reached due to the return in the loop + Err(anyhow::anyhow!("Max retries reached while polling for block {}", block_number)) + } +} + +#[cfg(test)] +mod l2_messaging_test { + use crate::client::ClientTrait; + use crate::messaging::sync; + use crate::starknet::utils::{ + cancel_messaging_event, fire_messaging_event, prepare_starknet_client_messaging_test, MadaraProcess, + StarknetAccount, MADARA_PORT, + }; + use crate::starknet::{StarknetClient, StarknetClientConfig}; + use mc_db::DatabaseService; + use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; + use mp_chain_config::ChainConfig; + use mp_utils::service::ServiceContext; + use rstest::{fixture, rstest}; + use starknet_api::core::Nonce; + use starknet_types_core::felt::Felt; + use std::sync::Arc; + use std::time::Duration; + use tempfile::TempDir; + use tracing_test::traced_test; + use url::Url; + + struct TestRunnerStarknet { + #[allow(dead_code)] + madara: MadaraProcess, // Not used but needs to stay in scope otherwise it will be dropped + account: StarknetAccount, + chain_config: Arc, + db_service: Arc, + deployed_address: Felt, + starknet_client: StarknetClient, + mempool: Arc, + } + + #[fixture] + async fn setup_test_env_starknet() -> TestRunnerStarknet { + let (account, deployed_contract_address, madara) = prepare_starknet_client_messaging_test().await.unwrap(); + + // Set up chain info + let chain_config = Arc::new(ChainConfig::madara_test()); + + // Set up database paths + let temp_dir = TempDir::new().expect("issue while creating temporary directory"); + let base_path = temp_dir.path().join("data"); + let backup_dir = Some(temp_dir.path().join("backups")); + + // Initialize database service + let db = Arc::new( + DatabaseService::new(&base_path, backup_dir, false, chain_config.clone(), Default::default()) + .await + .expect("Failed to create database service"), + ); + + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str()).unwrap(), + l2_contract_address: deployed_contract_address, + }) + .await + .unwrap(); + + let l1_gas_setter = GasPriceProvider::new(); + let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); + + let mempool = Arc::new(Mempool::new( + Arc::clone(db.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::for_testing(), + )); + + TestRunnerStarknet { + madara, + account, + chain_config, + db_service: db, + deployed_address: deployed_contract_address, + starknet_client, + mempool, + } + } + + #[rstest] + #[traced_test] + #[tokio::test] + async fn e2e_test_basic_workflow_starknet( + #[future] setup_test_env_starknet: TestRunnerStarknet, + ) -> anyhow::Result<()> { + // Initial Setup + // ================================== + let TestRunnerStarknet { + madara: _madara, + account, + chain_config, + db_service: db, + deployed_address: deployed_contract_address, + starknet_client, + mempool, + } = setup_test_env_starknet.await; + + // Start worker handle + // ================================== + let worker_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + sync( + Arc::new(Box::new(starknet_client)), + Arc::clone(db.backend()), + chain_config.chain_id.clone(), + mempool, + ServiceContext::new_for_testing(), + ) + .await + }) + }; + + // Firing the event + let fire_event_block_number = fire_messaging_event(&account, deployed_contract_address).await?; + tokio::time::sleep(Duration::from_secs(10)).await; + + // Log asserts + // =========== + assert!(logs_contain("fromAddress: \"0x07484e8e3af210b2ead47fa08c96f8d18b616169b350a8b75fe0dc4d2e01d493\"")); + // hash calculated in the contract : 0x210c8d7fdedf3e9d775ba12b12da86ea67878074a21b625e06dac64d5838ad0 + // expecting the same in logs + assert!(logs_contain("event hash: \"0x210c8d7fdedf3e9d775ba12b12da86ea67878074a21b625e06dac64d5838ad0\"")); + + // Assert that the event is well stored in db + let last_block = + db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); + assert_eq!(last_block.block_number, fire_event_block_number); + let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); + assert!(db.backend().has_l1_messaging_nonce(nonce)?); + + // Cancelling worker + worker_handle.abort(); + Ok(()) + } + + #[rstest] + #[traced_test] + #[tokio::test] + // This test is redundant now as the event poller will not return the same + // event twice with same nonce that's why added ignore here. + #[ignore] + async fn e2e_test_already_processed_event_starknet( + #[future] setup_test_env_starknet: TestRunnerStarknet, + ) -> anyhow::Result<()> { + // Initial Setup + // ================================== + let TestRunnerStarknet { + madara: _madara, + account, + chain_config, + db_service: db, + deployed_address: deployed_contract_address, + starknet_client, + mempool, + } = setup_test_env_starknet.await; + + // Start worker handle + // ================================== + let worker_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + sync( + Arc::new(Box::new(starknet_client)), + Arc::clone(db.backend()), + chain_config.chain_id.clone(), + mempool, + ServiceContext::new_for_testing(), + ) + .await + }) + }; + + // Firing the event + let fire_event_block_number = fire_messaging_event(&account, deployed_contract_address).await?; + tokio::time::sleep(Duration::from_secs(10)).await; + + // Log asserts + // =========== + assert!(logs_contain("fromAddress: \"0x07484e8e3af210b2ead47fa08c96f8d18b616169b350a8b75fe0dc4d2e01d493\"")); + // hash calculated in the contract : 0x210c8d7fdedf3e9d775ba12b12da86ea67878074a21b625e06dac64d5838ad0 + // expecting the same in logs + assert!(logs_contain("event hash: \"0x210c8d7fdedf3e9d775ba12b12da86ea67878074a21b625e06dac64d5838ad0\"")); + + // Assert that the event is well stored in db + let last_block = + db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); + assert_eq!(last_block.block_number, fire_event_block_number); + let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); + assert!(db.backend().has_l1_messaging_nonce(nonce)?); + + // Firing the event second time + fire_messaging_event(&account, deployed_contract_address).await?; + tokio::time::sleep(Duration::from_secs(15)).await; + // Assert that the event processed was in last block only not in the latest block. + assert_eq!( + last_block.block_number, + db.backend() + .messaging_last_synced_l1_block_with_event() + .expect("failed to retrieve block") + .unwrap() + .block_number + ); + assert!(logs_contain("Event already processed")); + + // Cancelling worker + worker_handle.abort(); + Ok(()) + } + + #[rstest] + #[traced_test] + #[tokio::test] + async fn e2e_test_message_canceled_starknet( + #[future] setup_test_env_starknet: TestRunnerStarknet, + ) -> anyhow::Result<()> { + // Initial Setup + // ================================== + let TestRunnerStarknet { + madara: _madara, + account, + chain_config, + db_service: db, + deployed_address: deployed_contract_address, + starknet_client, + mempool, + } = setup_test_env_starknet.await; + + // Start worker handle + // ================================== + let worker_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + sync( + Arc::new(Box::new(starknet_client)), + Arc::clone(db.backend()), + chain_config.chain_id.clone(), + mempool, + ServiceContext::new_for_testing(), + ) + .await + }) + }; + + cancel_messaging_event(&account, deployed_contract_address).await?; + // Firing cancelled event + fire_messaging_event(&account, deployed_contract_address).await?; + tokio::time::sleep(Duration::from_secs(15)).await; + + let last_block = + db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); + assert_eq!(last_block.block_number, 0); + let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); + // cancelled message nonce should be inserted to avoid reprocessing + assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); + assert!(logs_contain("Message was cancelled in block at timestamp: 0x66b4f105")); + + // Cancelling worker + worker_handle.abort(); + Ok(()) + } +} + +#[cfg(test)] +mod starknet_client_event_subscription_test { + use crate::client::ClientTrait; + use crate::gas_price::L1BlockMetrics; + use crate::starknet::event::StarknetEventStream; + use crate::starknet::utils::{prepare_starknet_client_test, send_state_update, MADARA_PORT}; + use crate::starknet::{StarknetClient, StarknetClientConfig}; + use crate::state_update::{state_update_worker, StateUpdate}; + use mc_db::DatabaseService; + use mp_chain_config::ChainConfig; + use mp_utils::service::ServiceContext; + use rstest::rstest; + use starknet_types_core::felt::Felt; + use std::str::FromStr; + use std::sync::Arc; + use std::time::Duration; + use tempfile::TempDir; + use url::Url; + + #[rstest] + #[tokio::test] + async fn listen_and_update_state_when_event_fired_starknet_client() -> anyhow::Result<()> { + // Setting up the DB and l1 block metrics + // ================================================ + + let chain_info = Arc::new(ChainConfig::madara_test()); + + // Set up database paths + let temp_dir = TempDir::new().expect("issue while creating temporary directory"); + let base_path = temp_dir.path().join("data"); + let backup_dir = Some(temp_dir.path().join("backups")); + + // Initialize database service + let db = Arc::new( + DatabaseService::new(&base_path, backup_dir, false, chain_info.clone(), Default::default()) + .await + .expect("Failed to create database service"), + ); + + // Making Starknet client and start worker + // ================================================ + let (account, deployed_address, _madara) = prepare_starknet_client_test().await?; + + let starknet_client = StarknetClient::new(StarknetClientConfig { + url: Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?, + l2_contract_address: deployed_address, + }) + .await?; + + let l1_block_metrics = L1BlockMetrics::register()?; + + let listen_handle = { + let db = Arc::clone(&db); + tokio::spawn(async move { + state_update_worker::( + Arc::clone(db.backend()), + Arc::new(Box::new(starknet_client)), + ServiceContext::new_for_testing(), + Arc::new(l1_block_metrics), + ) + .await + .expect("Failed to init state update worker.") + }) + }; + + // Firing the state update event + send_state_update( + &account, + deployed_address, + StateUpdate { + block_number: 100, + global_root: Felt::from_str("0xbeef")?, + block_hash: Felt::from_str("0xbeef")?, + }, + ) + .await?; + + // Wait for this update to be registered in the DB. Approx 10 secs + tokio::time::sleep(Duration::from_secs(10)).await; + + // Verify the block number + let block_in_db = + db.backend().get_l1_last_confirmed_block().expect("Failed to get L2 last confirmed block number"); + + listen_handle.abort(); + assert_eq!(block_in_db, Some(100), "Block in DB does not match expected L3 block number"); + Ok(()) + } +} diff --git a/crates/madara/client/settlement_client/src/starknet/utils.rs b/crates/madara/client/settlement_client/src/starknet/utils.rs new file mode 100644 index 000000000..efe37a603 --- /dev/null +++ b/crates/madara/client/settlement_client/src/starknet/utils.rs @@ -0,0 +1,259 @@ +use crate::state_update::StateUpdate; +use assert_matches::assert_matches; +use starknet_accounts::{Account, ConnectedAccount, ExecutionEncoding, SingleOwnerAccount}; +use starknet_core::types::contract::SierraClass; +use starknet_core::types::{BlockId, BlockTag, Call, TransactionReceipt, TransactionReceiptWithBlockInfo}; +use starknet_core::utils::get_selector_from_name; +use starknet_providers::jsonrpc::HttpTransport; +use starknet_providers::{JsonRpcClient, Provider, ProviderError}; +use starknet_signers::{LocalWallet, SigningKey}; +use starknet_types_core::felt::Felt; +use std::future::Future; +use std::net::TcpStream; +use std::path::PathBuf; +use std::process::{Child, Command}; +use std::str::FromStr; +use std::sync::Arc; +use std::thread; + +use m_cairo_test_contracts::{APPCHAIN_CONTRACT_SIERRA, MESSAGING_CONTRACT_SIERRA}; +use std::time::Duration; +use url::Url; + +pub const DEPLOYER_ADDRESS: &str = "0x055be462e718c4166d656d11f89e341115b8bc82389c3762a10eade04fcb225d"; +pub const DEPLOYER_PRIVATE_KEY: &str = "0x077e56c6dc32d40a67f6f7e6625c8dc5e570abe49c0a24e9202e4ae906abcc07"; +pub const UDC_ADDRESS: &str = "0x041a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf"; +pub const MADARA_PORT: &str = "19944"; +pub const MADARA_BINARY_PATH: &str = "../../../../test-artifacts/madara"; +pub const MADARA_CONFIG_PATH: &str = "../../../../configs/presets/devnet.yaml"; + +// starkli class-hash crates/client/settlement_client/src/starknet/test_contracts/appchain_test.casm.json +pub const APPCHAIN_CONTRACT_CASM_HASH: &str = "0x07f36e830605ddeb7c4c094639b628de297cbf61f45385b1fc3231029922b30b"; +// starkli class-hash crates/client/settlement_client/src/starknet/test_contracts/messaging_test.casm.json +pub const MESSAGING_CONTRACT_CASM_HASH: &str = "0x077de37b708f9abe01c1a797856398c5e1e5dfde8213f884668fa37b13d77e30"; + +pub type StarknetAccount = SingleOwnerAccount, LocalWallet>; +pub type TransactionReceiptResult = Result; + +pub struct MadaraProcess { + pub process: Child, + #[allow(dead_code)] + pub binary_path: PathBuf, +} + +impl MadaraProcess { + pub fn new(binary_path: PathBuf) -> Result { + let process = Command::new(&binary_path) + .arg("--name") + .arg("madara") + .arg("--base-path") + .arg("../madara-db33") + .arg("--rpc-port") + .arg(MADARA_PORT) + .arg("--rpc-cors") + .arg("*") + .arg("--rpc-external") + .arg("--devnet") + .arg("--chain-config-path") + .arg(MADARA_CONFIG_PATH) + .arg("--feeder-gateway-enable") + .arg("--gateway-enable") + .arg("--gateway-external") + .arg("--gateway-port") + .arg("8080") + .arg("--no-l1-sync") + .arg("--chain-config-override=block_time=5s,pending_block_update_time=1s") + .spawn()?; + + wait_for_port(MADARA_PORT.parse().unwrap(), 2, 10); + + Ok(Self { process, binary_path }) + } +} + +impl Drop for MadaraProcess { + fn drop(&mut self) { + if let Err(e) = self.process.kill() { + eprintln!("Failed to kill Madara process: {}", e); + } else { + Command::new("rm").arg("-rf").arg("../madara-db33").status().expect("Failed to delete the madara db"); + println!("Madara process killed successfully"); + } + } +} + +pub async fn prepare_starknet_client_test() -> anyhow::Result<(StarknetAccount, Felt, MadaraProcess)> { + let madara = MadaraProcess::new(PathBuf::from(MADARA_BINARY_PATH))?; + let account = starknet_account()?; + let deployed_appchain_contract_address = + deploy_contract(&account, APPCHAIN_CONTRACT_SIERRA, APPCHAIN_CONTRACT_CASM_HASH).await?; + Ok((account, deployed_appchain_contract_address, madara)) +} + +pub async fn prepare_starknet_client_messaging_test() -> anyhow::Result<(StarknetAccount, Felt, MadaraProcess)> { + let madara = MadaraProcess::new(PathBuf::from(MADARA_BINARY_PATH))?; + let account = starknet_account()?; + let deployed_appchain_contract_address = + deploy_contract(&account, MESSAGING_CONTRACT_SIERRA, MESSAGING_CONTRACT_CASM_HASH).await?; + Ok((account, deployed_appchain_contract_address, madara)) +} + +pub async fn send_state_update( + account: &StarknetAccount, + appchain_contract_address: Felt, + update: StateUpdate, +) -> anyhow::Result { + let call = account + .execute_v1(vec![Call { + to: appchain_contract_address, + selector: get_selector_from_name("update_state")?, + calldata: vec![Felt::from(update.block_number), update.global_root, update.block_hash], + }]) + .send() + .await?; + let receipt = get_transaction_receipt(account.provider(), call.transaction_hash).await?; + + let latest_block_number_recorded = account.provider().block_number().await?; + + match receipt.block.block_number() { + Some(block_number) => Ok(block_number), + None => Ok(latest_block_number_recorded + 1), + } +} + +pub async fn fire_messaging_event(account: &StarknetAccount, appchain_contract_address: Felt) -> anyhow::Result { + let call = account + .execute_v1(vec![Call { + to: appchain_contract_address, + selector: get_selector_from_name("fire_event")?, + calldata: vec![], + }]) + .send() + .await?; + let receipt = get_transaction_receipt(account.provider(), call.transaction_hash).await?; + + let latest_block_number_recorded = account.provider().block_number().await?; + + match receipt.block.block_number() { + Some(block_number) => Ok(block_number), + None => Ok(latest_block_number_recorded + 1), + } +} + +pub async fn cancel_messaging_event(account: &StarknetAccount, appchain_contract_address: Felt) -> anyhow::Result { + let call = account + .execute_v1(vec![Call { + to: appchain_contract_address, + selector: get_selector_from_name("set_is_canceled")?, + calldata: vec![Felt::ONE], + }]) + .send() + .await?; + let receipt = get_transaction_receipt(account.provider(), call.transaction_hash).await?; + + let latest_block_number_recorded = account.provider().block_number().await?; + + match receipt.block.block_number() { + Some(block_number) => Ok(block_number), + None => Ok(latest_block_number_recorded + 1), + } +} + +pub fn starknet_account() -> anyhow::Result { + let provider = + JsonRpcClient::new(HttpTransport::new(Url::parse(format!("http://127.0.0.1:{}", MADARA_PORT).as_str())?)); + let signer = LocalWallet::from(SigningKey::from_secret_scalar(Felt::from_str(DEPLOYER_PRIVATE_KEY)?)); + let mut account = SingleOwnerAccount::new( + provider, + signer, + Felt::from_str(DEPLOYER_ADDRESS)?, + // MADARA_DEVNET + Felt::from_str("0x4D41444152415F4445564E4554")?, + ExecutionEncoding::New, + ); + account.set_block_id(BlockId::Tag(BlockTag::Pending)); + Ok(account) +} + +pub async fn deploy_contract(account: &StarknetAccount, sierra: &[u8], casm_hash: &str) -> anyhow::Result { + let contract_artifact: SierraClass = serde_json::from_slice(sierra)?; + let flattened_class = contract_artifact.flatten()?; + let result = account.declare_v2(Arc::new(flattened_class), Felt::from_str(casm_hash)?).send().await?; + tokio::time::sleep(Duration::from_secs(5)).await; + let deployment = account + .execute_v3(vec![Call { + to: Felt::from_str(UDC_ADDRESS)?, + selector: get_selector_from_name("deployContract")?, + calldata: vec![result.class_hash, Felt::ZERO, Felt::ZERO, Felt::ZERO], + }]) + .send() + .await?; + let deployed_contract_address = + get_deployed_contract_address(deployment.transaction_hash, account.provider()).await?; + tokio::time::sleep(Duration::from_secs(5)).await; + Ok(deployed_contract_address) +} + +pub async fn get_deployed_contract_address( + txn_hash: Felt, + provider: &JsonRpcClient, +) -> anyhow::Result { + let deploy_tx_receipt = get_transaction_receipt(provider, txn_hash).await?; + let contract_address = assert_matches!( + deploy_tx_receipt, + TransactionReceiptWithBlockInfo { receipt: TransactionReceipt::Invoke(receipt), .. } => { + receipt.events.iter().find(|e| e.keys[0] == get_selector_from_name("ContractDeployed").unwrap()).unwrap().data[0] + } + ); + Ok(contract_address) +} + +pub async fn get_transaction_receipt( + rpc: &JsonRpcClient, + transaction_hash: Felt, +) -> TransactionReceiptResult { + // there is a delay between the transaction being available at the client + // and the pending tick of the block, hence sleeping for 500ms + assert_poll(|| async { rpc.get_transaction_receipt(transaction_hash).await.is_ok() }, 500, 20).await; + rpc.get_transaction_receipt(transaction_hash).await +} + +pub async fn assert_poll(f: F, polling_time_ms: u64, max_poll_count: u32) +where + F: Fn() -> Fut, + Fut: Future, +{ + for _poll_count in 0..max_poll_count { + if f().await { + return; + } + tokio::time::sleep(Duration::from_millis(polling_time_ms)).await; + } + panic!("Max poll count exceeded."); +} + +fn wait_for_port(port: u16, timeout_secs: u64, max_retries: u32) -> bool { + let mut attempts = 0; + println!("Waiting for port {} to be available...", port); + + while attempts < max_retries { + if check_port(port, timeout_secs) { + println!("Port {} is now available! (attempt {}/{})", port, attempts + 1, max_retries); + return true; + } + + attempts += 1; + if attempts < max_retries { + println!("Port {} not available, retrying... (attempt {}/{})", port, attempts, max_retries); + thread::sleep(Duration::from_secs(timeout_secs)); + } + } + + println!("Port {} not available after {} attempts", port, max_retries); + false +} + +fn check_port(port: u16, timeout_secs: u64) -> bool { + TcpStream::connect_timeout(&std::net::SocketAddr::from(([127, 0, 0, 1], port)), Duration::from_secs(timeout_secs)) + .is_ok() +} diff --git a/crates/madara/client/settlement_client/src/state_update.rs b/crates/madara/client/settlement_client/src/state_update.rs new file mode 100644 index 000000000..ece528854 --- /dev/null +++ b/crates/madara/client/settlement_client/src/state_update.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; + +use crate::client::ClientTrait; +use crate::gas_price::L1BlockMetrics; +use crate::messaging::CommonMessagingEventData; +use anyhow::Context; +use futures::Stream; +use mc_db::MadaraBackend; +use mp_utils::service::ServiceContext; +use mp_utils::trim_hash; +use serde::Deserialize; +use starknet_types_core::felt::Felt; + +#[derive(Debug, Clone, Deserialize, PartialEq)] +pub struct StateUpdate { + pub block_number: u64, + pub global_root: Felt, + pub block_hash: Felt, +} + +pub fn update_l1( + backend: &MadaraBackend, + state_update: StateUpdate, + block_metrics: Arc, +) -> anyhow::Result<()> { + tracing::info!( + "🔄 Updated L1 head #{} ({}) with state root ({})", + state_update.block_number, + trim_hash(&state_update.block_hash), + trim_hash(&state_update.global_root) + ); + + block_metrics.l1_block_number.record(state_update.block_number, &[]); + + backend.write_last_confirmed_block(state_update.block_number).context("Setting l1 last confirmed block number")?; + tracing::debug!("update_l1: wrote last confirmed block number"); + + Ok(()) +} + +pub async fn state_update_worker( + backend: Arc, + settlement_client: Arc>>, + ctx: ServiceContext, + l1_block_metrics: Arc, +) -> anyhow::Result<()> +where + S: Stream>> + Send + 'static, +{ + // Clear L1 confirmed block at startup + backend.clear_last_confirmed_block().context("Clearing l1 last confirmed block number")?; + tracing::debug!("update_l1: cleared confirmed block number"); + + tracing::info!("🚀 Subscribed to L1 state verification"); + // This does not seem to play well with anvil + #[cfg(not(test))] + { + let initial_state = settlement_client.get_initial_state().await.context("Getting initial ethereum state")?; + update_l1(&backend, initial_state, l1_block_metrics.clone())?; + } + + settlement_client.listen_for_update_state_events(backend, ctx, l1_block_metrics.clone()).await?; + anyhow::Ok(()) +} diff --git a/crates/madara/client/settlement_client/src/sync.rs b/crates/madara/client/settlement_client/src/sync.rs new file mode 100644 index 000000000..3b2358bcc --- /dev/null +++ b/crates/madara/client/settlement_client/src/sync.rs @@ -0,0 +1,54 @@ +use crate::client::ClientTrait; +use crate::gas_price::{gas_price_worker, L1BlockMetrics}; +use crate::messaging::{sync, CommonMessagingEventData}; +use crate::state_update::state_update_worker; +use futures::Stream; +use mc_db::MadaraBackend; +use mc_mempool::{GasPriceProvider, Mempool}; +use mp_utils::service::ServiceContext; +use starknet_api::core::ChainId; +use std::sync::Arc; +use std::time::Duration; + +#[allow(clippy::too_many_arguments)] +pub async fn sync_worker( + backend: Arc, + settlement_client: Arc>>, + chain_id: ChainId, + l1_gas_provider: GasPriceProvider, + gas_price_sync_disabled: bool, + gas_price_poll_ms: Duration, + mempool: Arc, + ctx: ServiceContext, + l1_block_metrics: Arc, +) -> anyhow::Result<()> +where + S: Stream>> + Send + 'static, +{ + let mut join_set = tokio::task::JoinSet::new(); + + join_set.spawn(state_update_worker( + Arc::clone(&backend), + settlement_client.clone(), + ctx.clone(), + l1_block_metrics.clone(), + )); + + join_set.spawn(sync(settlement_client.clone(), Arc::clone(&backend), chain_id, mempool, ctx.clone())); + + if !gas_price_sync_disabled { + join_set.spawn(gas_price_worker( + settlement_client.clone(), + l1_gas_provider, + gas_price_poll_ms, + ctx.clone(), + l1_block_metrics, + )); + } + + while let Some(res) = join_set.join_next().await { + res??; + } + + Ok(()) +} diff --git a/crates/madara/client/eth/src/utils.rs b/crates/madara/client/settlement_client/src/utils.rs similarity index 87% rename from crates/madara/client/eth/src/utils.rs rename to crates/madara/client/settlement_client/src/utils.rs index 07f0b4b90..43fd0fa0e 100644 --- a/crates/madara/client/eth/src/utils.rs +++ b/crates/madara/client/settlement_client/src/utils.rs @@ -1,12 +1,10 @@ -use crate::client::StarknetCoreContract; -use crate::state_update::L1StateUpdate; +use crate::eth::StarknetCoreContract; +use crate::state_update::StateUpdate; use alloy::primitives::{I256, U256}; use anyhow::bail; use starknet_types_core::felt::Felt; -pub fn convert_log_state_update( - log_state_update: StarknetCoreContract::LogStateUpdate, -) -> anyhow::Result { +pub fn convert_log_state_update(log_state_update: StarknetCoreContract::LogStateUpdate) -> anyhow::Result { let block_number = if log_state_update.blockNumber >= I256::ZERO { log_state_update.blockNumber.low_u64() } else { @@ -16,7 +14,7 @@ pub fn convert_log_state_update( let global_root = u256_to_felt(log_state_update.globalRoot)?; let block_hash = u256_to_felt(log_state_update.blockHash)?; - Ok(L1StateUpdate { block_number, global_root, block_hash }) + Ok(StateUpdate { block_number, global_root, block_hash }) } pub fn u256_to_felt(u256: U256) -> anyhow::Result { @@ -44,7 +42,7 @@ mod eth_client_conversion_tests { let global_root: u128 = 456; let expected = - L1StateUpdate { block_number, block_hash: Felt::from(block_hash), global_root: Felt::from(global_root) }; + StateUpdate { block_number, block_hash: Felt::from(block_hash), global_root: Felt::from(global_root) }; let input = StarknetCoreContract::LogStateUpdate { blockNumber: I256::from_dec_str(block_number.to_string().as_str()).unwrap(), diff --git a/crates/madara/node/Cargo.toml b/crates/madara/node/Cargo.toml index 4b1e7c4bf..51d98dacb 100644 --- a/crates/madara/node/Cargo.toml +++ b/crates/madara/node/Cargo.toml @@ -26,11 +26,11 @@ mc-block-import = { workspace = true } mc-block-production = { workspace = true } mc-db = { workspace = true } mc-devnet = { workspace = true } -mc-eth = { workspace = true } mc-gateway-client = { workspace = true } mc-gateway-server = { workspace = true } mc-mempool = { workspace = true } mc-rpc = { workspace = true } +mc-settlement-client = { workspace = true } mc-sync = { workspace = true } mc-telemetry = { workspace = true } mp-block = { workspace = true } @@ -58,6 +58,8 @@ reqwest.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true serde_yaml.workspace = true +starknet-core.workspace = true +starknet-providers.workspace = true thiserror.workspace = true tokio.workspace = true tower.workspace = true diff --git a/crates/madara/node/src/cli/chain_config_overrides.rs b/crates/madara/node/src/cli/chain_config_overrides.rs index 61808505e..8abe4a958 100644 --- a/crates/madara/node/src/cli/chain_config_overrides.rs +++ b/crates/madara/node/src/cli/chain_config_overrides.rs @@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize}; use serde_yaml::Value; use starknet_api::core::{ChainId, ContractAddress}; -use mp_block::H160; use mp_chain_config::{ deserialize_bouncer_config, deserialize_starknet_version, serialize_bouncer_config, serialize_starknet_version, ChainConfig, StarknetVersion, @@ -101,8 +100,8 @@ pub struct ChainConfigOverridesInner { #[serde(deserialize_with = "deserialize_bouncer_config", serialize_with = "serialize_bouncer_config")] pub bouncer_config: BouncerConfig, pub sequencer_address: ContractAddress, - pub eth_core_contract_address: H160, - pub eth_gps_statement_verifier: H160, + pub eth_core_contract_address: String, + pub eth_gps_statement_verifier: String, #[serde(default)] #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] diff --git a/crates/madara/node/src/cli/l1.rs b/crates/madara/node/src/cli/l1.rs index 2d28a8b25..5de384644 100644 --- a/crates/madara/node/src/cli/l1.rs +++ b/crates/madara/node/src/cli/l1.rs @@ -1,9 +1,37 @@ +use alloy::primitives::private::derive_more::FromStr; +use std::fmt; use std::time::Duration; - use url::Url; use mp_utils::parsers::{parse_duration, parse_url}; +#[derive(Clone, Debug)] +pub enum MadaraSettlementLayer { + Eth, + Starknet, +} + +impl FromStr for MadaraSettlementLayer { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_uppercase().as_str() { + "ETH" => Ok(MadaraSettlementLayer::Eth), + "STARKNET" => Ok(MadaraSettlementLayer::Starknet), + _ => Err(format!("Invalid settlement layer: {}", s)), + } + } +} + +impl fmt::Display for MadaraSettlementLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MadaraSettlementLayer::Eth => write!(f, "ETH"), + MadaraSettlementLayer::Starknet => write!(f, "STARKNET"), + } + } +} + #[derive(Clone, Debug, clap::Args)] pub struct L1SyncParams { /// Disable L1 sync. @@ -46,4 +74,11 @@ pub struct L1SyncParams { value_parser = parse_duration, )] pub gas_price_poll: Duration, + + #[clap( + env = "MADARA_SETTLEMENT_LAYER", + long, + default_value_t = MadaraSettlementLayer::Eth, + )] + pub settlement_layer: MadaraSettlementLayer, } diff --git a/crates/madara/node/src/main.rs b/crates/madara/node/src/main.rs index d71b89073..da936e1ce 100644 --- a/crates/madara/node/src/main.rs +++ b/crates/madara/node/src/main.rs @@ -5,6 +5,8 @@ mod cli; mod service; mod util; +use crate::cli::l1::MadaraSettlementLayer; +use crate::service::L1SyncConfig; use anyhow::{bail, Context}; use clap::Parser; use cli::RunCmd; @@ -15,6 +17,11 @@ use mc_db::{DatabaseService, TrieLogConfig}; use mc_gateway_client::GatewayProvider; use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; use mc_rpc::providers::{AddTransactionProvider, ForwardToProvider, MempoolAddTxProvider}; +use mc_settlement_client::eth::event::EthereumEventStream; +use mc_settlement_client::eth::EthereumClientConfig; +use mc_settlement_client::gas_price::L1BlockMetrics; +use mc_settlement_client::starknet::event::StarknetEventStream; +use mc_settlement_client::starknet::StarknetClientConfig; use mc_sync::fetch::fetchers::WarpUpdateConfig; use mc_telemetry::{SysInfo, TelemetryService}; use mp_oracle::pragma::PragmaOracleBuilder; @@ -163,18 +170,39 @@ async fn main() -> anyhow::Result<()> { mempool.load_txs_from_db().context("Loading mempool transactions")?; let mempool = Arc::new(mempool); - let service_l1_sync = L1SyncService::new( - &run_cmd.l1_sync_params, - &service_db, - l1_gas_setter, - chain_config.chain_id.clone(), - chain_config.eth_core_contract_address, - run_cmd.is_sequencer(), - run_cmd.is_devnet(), - Arc::clone(&mempool), - ) - .await - .context("Initializing the l1 sync service")?; + let l1_block_metrics = L1BlockMetrics::register().context("Initializing L1 Block Metrics")?; + let service_l1_sync = match &run_cmd.l1_sync_params.settlement_layer { + MadaraSettlementLayer::Eth => L1SyncService::::create( + &run_cmd.l1_sync_params, + L1SyncConfig { + db: &service_db, + l1_gas_provider: l1_gas_setter, + chain_id: chain_config.chain_id.clone(), + l1_core_address: chain_config.eth_core_contract_address.clone(), + authority: run_cmd.is_sequencer(), + devnet: run_cmd.is_devnet(), + mempool: Arc::clone(&mempool), + l1_block_metrics: Arc::new(l1_block_metrics), + }, + ) + .await + .context("Initializing the l1 sync service")?, + MadaraSettlementLayer::Starknet => L1SyncService::::create( + &run_cmd.l1_sync_params, + L1SyncConfig { + db: &service_db, + l1_gas_provider: l1_gas_setter, + chain_id: chain_config.chain_id.clone(), + l1_core_address: chain_config.eth_core_contract_address.clone(), + authority: run_cmd.is_sequencer(), + devnet: run_cmd.is_devnet(), + mempool: Arc::clone(&mempool), + l1_block_metrics: Arc::new(l1_block_metrics), + }, + ) + .await + .context("Initializing the l1 sync service")?, + }; // L2 Sync diff --git a/crates/madara/node/src/service/l1.rs b/crates/madara/node/src/service/l1.rs index e305d741c..9a449771c 100644 --- a/crates/madara/node/src/service/l1.rs +++ b/crates/madara/node/src/service/l1.rs @@ -1,112 +1,198 @@ -use crate::cli::l1::L1SyncParams; +use crate::cli::l1::{L1SyncParams, MadaraSettlementLayer}; use alloy::primitives::Address; use anyhow::Context; +use futures::Stream; use mc_db::{DatabaseService, MadaraBackend}; -use mc_eth::client::{EthereumClient, L1BlockMetrics}; use mc_mempool::{GasPriceProvider, Mempool}; -use mp_block::H160; +use mc_settlement_client::client::ClientTrait; +use mc_settlement_client::eth::event::EthereumEventStream; +use mc_settlement_client::eth::{EthereumClient, EthereumClientConfig}; +use mc_settlement_client::gas_price::L1BlockMetrics; +use mc_settlement_client::messaging::CommonMessagingEventData; +use mc_settlement_client::starknet::event::StarknetEventStream; +use mc_settlement_client::starknet::{StarknetClient, StarknetClientConfig}; use mp_utils::service::{MadaraServiceId, PowerOfTwo, Service, ServiceId, ServiceRunner}; use starknet_api::core::ChainId; +use starknet_core::types::Felt; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -#[derive(Clone)] -pub struct L1SyncService { +// Configuration struct to group related parameters +pub struct L1SyncConfig<'a> { + pub db: &'a DatabaseService, + pub l1_gas_provider: GasPriceProvider, + pub chain_id: ChainId, + pub l1_core_address: String, + pub authority: bool, + pub devnet: bool, + pub mempool: Arc, + pub l1_block_metrics: Arc, +} + +impl<'a> L1SyncConfig<'a> { + pub fn should_enable_sync(&self, config: &L1SyncParams) -> bool { + !config.l1_sync_disabled && (config.l1_endpoint.is_some() || !self.devnet) + } +} + +pub struct L1SyncService +where + C: Clone, + S: Send + Stream>>, +{ db_backend: Arc, - eth_client: Option>, + settlement_client: Option>>>, l1_gas_provider: GasPriceProvider, chain_id: ChainId, gas_price_sync_disabled: bool, gas_price_poll: Duration, mempool: Arc, + l1_block_metrics: Arc, } -impl L1SyncService { - #[allow(clippy::too_many_arguments)] - pub async fn new( - config: &L1SyncParams, - db: &DatabaseService, - l1_gas_provider: GasPriceProvider, - chain_id: ChainId, - l1_core_address: H160, - authority: bool, - devnet: bool, - mempool: Arc, - ) -> anyhow::Result { - let eth_client = if !config.l1_sync_disabled && (config.l1_endpoint.is_some() || !devnet) { +pub type EthereumSyncService = L1SyncService; +pub type StarknetSyncService = L1SyncService; + +// Implementation for Ethereum +impl EthereumSyncService { + pub async fn new(config: &L1SyncParams, sync_config: L1SyncConfig<'_>) -> anyhow::Result { + let settlement_client = { if let Some(l1_rpc_url) = &config.l1_endpoint { - let core_address = Address::from_slice(l1_core_address.as_bytes()); - let l1_block_metrics = L1BlockMetrics::register().expect("Registering metrics"); - let client = EthereumClient::new(l1_rpc_url.clone(), core_address, l1_block_metrics) - .await - .context("Creating ethereum client")?; + let core_address = Address::from_str(sync_config.l1_core_address.as_str())?; + let client = EthereumClient::new(EthereumClientConfig { + url: l1_rpc_url.clone(), + l1_core_address: core_address, + }) + .await + .context("Creating ethereum client")?; - Some(Arc::new(client)) + let client_converted: Box< + dyn ClientTrait, + > = Box::new(client); + Some(Arc::new(client_converted)) } else { anyhow::bail!( - "No Ethereum endpoint provided. You need to provide one using --l1-endpoint in order to verify the synced state or disable the l1 watcher using --no-l1-sync." + "No Ethereum endpoint provided. Use --l1-endpoint or disable with --no-l1-sync." ); } - } else { - None }; - // Note: gas price should be synced in case the madara is running in sequencer mode, - // we haven't set any fix price for the gas, hence gas price should be none - let gas_price_sync_enabled = - authority && !devnet && (config.gas_price.is_none() || config.blob_gas_price.is_none()); + Self::create_service(config, sync_config, settlement_client).await + } +} + +// Implementation for Starknet +impl StarknetSyncService { + pub async fn new(config: &L1SyncParams, sync_config: L1SyncConfig<'_>) -> anyhow::Result { + let settlement_client = { + if let Some(l1_rpc_url) = &config.l1_endpoint { + let core_address = Felt::from_str(sync_config.l1_core_address.as_str())?; + let client = StarknetClient::new(StarknetClientConfig { + url: l1_rpc_url.clone(), + l2_contract_address: core_address, + }) + .await + .context("Creating starknet client")?; + + let client_converted: Box< + dyn ClientTrait, + > = Box::new(client); + Some(Arc::new(client_converted)) + } else { + anyhow::bail!( + "No Starknet endpoint provided. Use --l1-endpoint or disable with --no-l1-sync." + ); + } + }; + + Self::create_service(config, sync_config, settlement_client).await + } +} + +// Shared implementation for both services +impl L1SyncService +where + C: Clone + 'static, + S: Send + Stream>> + 'static, +{ + async fn create_service( + config: &L1SyncParams, + sync_config: L1SyncConfig<'_>, + settlement_client: Option>>>, + ) -> anyhow::Result { + let gas_price_sync_enabled = sync_config.authority + && !sync_config.devnet + && (config.gas_price.is_none() || config.blob_gas_price.is_none()); let gas_price_poll = config.gas_price_poll; if gas_price_sync_enabled { - let eth_client = eth_client - .clone() - .context("L1 gas prices require the ethereum service to be enabled. Either disable gas prices syncing using `--gas-price 0`, or disable L1 sync using the `--no-l1-sync` argument.")?; - // running at-least once before the block production service + let settlement_client = + settlement_client.clone().context("L1 gas prices require the service to be enabled...")?; tracing::info!("⏳ Getting initial L1 gas prices"); - mc_eth::l1_gas_price::gas_price_worker_once(ð_client, &l1_gas_provider, gas_price_poll) - .await - .context("Getting initial ethereum gas prices")?; + mc_settlement_client::gas_price::gas_price_worker_once( + settlement_client, + &sync_config.l1_gas_provider, + gas_price_poll, + sync_config.l1_block_metrics.clone(), + ) + .await + .context("Getting initial gas prices")?; } Ok(Self { - db_backend: Arc::clone(db.backend()), - eth_client, - l1_gas_provider, - chain_id, + db_backend: Arc::clone(sync_config.db.backend()), + settlement_client, + l1_gas_provider: sync_config.l1_gas_provider, + chain_id: sync_config.chain_id, gas_price_sync_disabled: !gas_price_sync_enabled, gas_price_poll, - mempool, + mempool: sync_config.mempool, + l1_block_metrics: sync_config.l1_block_metrics, }) } + + // Factory method to create the appropriate service + pub async fn create(config: &L1SyncParams, sync_config: L1SyncConfig<'_>) -> anyhow::Result> { + if sync_config.should_enable_sync(config) { + match config.settlement_layer { + MadaraSettlementLayer::Eth => Ok(Box::new(EthereumSyncService::new(config, sync_config).await?)), + MadaraSettlementLayer::Starknet => Ok(Box::new(StarknetSyncService::new(config, sync_config).await?)), + } + } else { + Err(anyhow::anyhow!("❗ L1 Sync is disabled")) + } + } } #[async_trait::async_trait] -impl Service for L1SyncService { +impl Service for L1SyncService +where + C: Clone, + S: Send + Stream>>, +{ async fn start<'a>(&mut self, runner: ServiceRunner<'a>) -> anyhow::Result<()> { - let L1SyncService { - db_backend, - l1_gas_provider, - chain_id, - gas_price_sync_disabled, - gas_price_poll, - mempool, - .. - } = self.clone(); - - if let Some(eth_client) = &self.eth_client { - // enabled + if let Some(settlement_client) = &self.settlement_client { + let db_backend = Arc::clone(&self.db_backend); + let settlement_client = Arc::clone(settlement_client); + let chain_id = self.chain_id.clone(); + let l1_gas_provider = self.l1_gas_provider.clone(); + let gas_price_sync_disabled = self.gas_price_sync_disabled; + let gas_price_poll = self.gas_price_poll; + let mempool = Arc::clone(&self.mempool); + let l1_block_metrics = self.l1_block_metrics.clone(); - let eth_client = Arc::clone(eth_client); runner.service_loop(move |ctx| { - mc_eth::sync::l1_sync_worker( + mc_settlement_client::sync::sync_worker( db_backend, - eth_client, + settlement_client, chain_id, l1_gas_provider, gas_price_sync_disabled, gas_price_poll, mempool, ctx, + l1_block_metrics, ) }); } else { @@ -117,7 +203,11 @@ impl Service for L1SyncService { } } -impl ServiceId for L1SyncService { +impl ServiceId for L1SyncService +where + C: Clone, + S: Send + Stream>>, +{ #[inline(always)] fn svc_id(&self) -> PowerOfTwo { MadaraServiceId::L1Sync.svc_id() diff --git a/crates/madara/node/src/service/mod.rs b/crates/madara/node/src/service/mod.rs index 84a36057b..bd66212c0 100644 --- a/crates/madara/node/src/service/mod.rs +++ b/crates/madara/node/src/service/mod.rs @@ -6,6 +6,7 @@ mod rpc; pub use block_production::BlockProductionService; pub use gateway::GatewayService; +pub use l1::L1SyncConfig; pub use l1::L1SyncService; pub use l2::L2SyncService; pub use rpc::RpcService; diff --git a/crates/madara/primitives/chain_config/src/chain_config.rs b/crates/madara/primitives/chain_config/src/chain_config.rs index 4d37e1d3d..2b20ce30b 100644 --- a/crates/madara/primitives/chain_config/src/chain_config.rs +++ b/crates/madara/primitives/chain_config/src/chain_config.rs @@ -18,7 +18,6 @@ use blockifier::bouncer::{BouncerWeights, BuiltinCount}; use blockifier::{bouncer::BouncerConfig, versioned_constants::VersionedConstants}; use lazy_static::__Deref; use mp_utils::crypto::ZeroingPrivateKey; -use primitive_types::H160; use serde::de::{MapAccess, Visitor}; use serde::{Deserialize, Deserializer, Serialize}; use starknet_api::core::{ChainId, ContractAddress, PatriciaKey}; @@ -114,11 +113,11 @@ pub struct ChainConfig { pub sequencer_address: ContractAddress, /// The Starknet core contract address for the L1 watcher. - pub eth_core_contract_address: H160, + pub eth_core_contract_address: String, /// The Starknet SHARP verifier La address. Check out the [docs](https://docs.starknet.io/architecture-and-concepts/solidity-verifier/) /// for more information - pub eth_gps_statement_verifier: H160, + pub eth_gps_statement_verifier: String, /// Private key used by the node to sign blocks provided through the /// feeder gateway. This serves as a proof of origin and in the future @@ -551,10 +550,7 @@ mod tests { ) .unwrap() ); - assert_eq!( - chain_config.eth_core_contract_address, - H160::from_str("0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4").unwrap() - ); + assert_eq!(chain_config.eth_core_contract_address, "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4"); } #[rstest] diff --git a/tests/js_tests/basic.test.ts b/tests/js_tests/basic.test.ts index 4149c32b2..771cbc420 100644 --- a/tests/js_tests/basic.test.ts +++ b/tests/js_tests/basic.test.ts @@ -87,7 +87,7 @@ async function declareContract({ provider, account }: TestContext) { expect(response.sierra_program).toEqual(sierra.sierra_program); expect(response.abi).toEqual(sierra.abi); expect(response.contract_class_version).toEqual( - sierra.contract_class_version, + sierra.contract_class_version ); expect(response.entry_points_by_type).toEqual(sierra.entry_points_by_type); } else { @@ -119,7 +119,7 @@ async function deployContract({ provider, account }: TestContext) { // Retrieve the class hash for the deployed contract let response = await provider.getClassHashAt( - deployResult.contract_address[0], + deployResult.contract_address[0] ); // Verify that the retrieved class hash matches the computed class hash @@ -139,7 +139,7 @@ async function deployContract({ provider, account }: TestContext) { async function deployAccount({ provider, account }: TestContext) { // Read the Sierra contract class for the account const sierra = readContractSierraInArtifacts( - "openzeppelin_AccountUpgradeable", + "openzeppelin_AccountUpgradeable" ); // Compute the class hash of the account contract @@ -159,7 +159,7 @@ async function deployAccount({ provider, account }: TestContext) { publicKey, classHash, calldata, - 0, + 0 ); // Create a new Account instance with the calculated address and private key @@ -174,7 +174,7 @@ async function deployAccount({ provider, account }: TestContext) { }, { maxFee: 0, - }, + } ); // Wait for the transaction to be confirmed and get the receipt @@ -203,14 +203,14 @@ async function transferFunds({ provider, account }: TestContext) { // Read the ERC20 contract class const erc20ContractData = readContractSierraInArtifacts( - "openzeppelin_ERC20Upgradeable", + "openzeppelin_ERC20Upgradeable" ); // Create an instance of the ERC20 contract const erc20Instance = new Contract( erc20ContractData.abi, ERC20_CONTRACT_ADDRESS, - provider, + provider ); // Connect the account to the ERC20 contract instance @@ -218,10 +218,11 @@ async function transferFunds({ provider, account }: TestContext) { // Get the initial balances of sender and receiver const preTransactSenderBalance = await erc20Instance.balance_of( - SIGNER_CONTRACT_ADDRESS, + SIGNER_CONTRACT_ADDRESS + ); + const preTransactReceiverBalance = await erc20Instance.balance_of( + RECEIVER_ADDRESS ); - const preTransactReceiverBalance = - await erc20Instance.balance_of(RECEIVER_ADDRESS); // Execute the transfer // Note: We are setting maxFee to zero here @@ -236,7 +237,7 @@ async function transferFunds({ provider, account }: TestContext) { }, { maxFee: 0, - }, + } ); // Wait for the transfer transaction to be confirmed @@ -244,19 +245,20 @@ async function transferFunds({ provider, account }: TestContext) { // Get the final balances of sender and receiver const postTransactSenderBalance = await erc20Instance.balance_of( - SIGNER_CONTRACT_ADDRESS, + SIGNER_CONTRACT_ADDRESS + ); + const postTransactReceiverBalance = await erc20Instance.balance_of( + RECEIVER_ADDRESS ); - const postTransactReceiverBalance = - await erc20Instance.balance_of(RECEIVER_ADDRESS); // Verify that the balances have been updated correctly // Note: In real world case, the sender balance would be // preTransactionSenderBalance - TRANSFER_AMOUNT - Fees // but we had fees set to zero while executing transaction expect(postTransactSenderBalance).toBe( - preTransactSenderBalance - TRANSFER_AMOUNT, + preTransactSenderBalance - TRANSFER_AMOUNT ); expect(postTransactReceiverBalance).toBe( - preTransactReceiverBalance + TRANSFER_AMOUNT, + preTransactReceiverBalance + TRANSFER_AMOUNT ); } diff --git a/tests/js_tests/utils.ts b/tests/js_tests/utils.ts index f75eded24..e2462f96e 100644 --- a/tests/js_tests/utils.ts +++ b/tests/js_tests/utils.ts @@ -8,10 +8,10 @@ export const readContractCasm = (name: string): CompiledSierraCasm => .readFileSync( path.resolve( __dirname, - `../../cairo/target/dev/${name}.compiled_contract_class.json`, - ), + `../../cairo/target/dev/${name}.compiled_contract_class.json` + ) ) - .toString("ascii"), + .toString("ascii") ); export const readContractSierra = (name: string): CompiledSierra => @@ -20,10 +20,10 @@ export const readContractSierra = (name: string): CompiledSierra => .readFileSync( path.resolve( __dirname, - `../../cairo/target/dev/${name}.contract_class.json`, - ), + `../../cairo/target/dev/${name}.contract_class.json` + ) ) - .toString("ascii"), + .toString("ascii") ); export const readContractSierraInArtifacts = (name: string): CompiledSierra => @@ -32,8 +32,8 @@ export const readContractSierraInArtifacts = (name: string): CompiledSierra => .readFileSync( path.resolve( __dirname, - `../../cairo-artifacts/${name}.contract_class.json`, - ), + `../../cairo-artifacts/${name}.contract_class.json` + ) ) - .toString("ascii"), + .toString("ascii") );