diff --git a/Cargo.lock b/Cargo.lock index 15d1ebf3..bee7a4a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7656,6 +7656,7 @@ dependencies = [ "reth-primitives", "reth-revm", "reth-storage-errors", + "risc0-ethereum-trie", "rkyv", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index fb8cdfa7..e1e7417c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,7 +36,7 @@ features = ["orphan", "rkyv", "rlp_serialize", "serde"] # External [workspace.dependencies] # Alloy -alloy = { version = "0.4.2", features = ["full"] } +alloy = { version = "0.4.2", features = ["full", "serde"] } alloy-chains = "0.1.38" alloy-consensus = "0.4.2" alloy-genesis = "0.4.2" diff --git a/bin/ethereum/Cargo.toml b/bin/ethereum/Cargo.toml index f6559992..fb29e3a4 100644 --- a/bin/ethereum/Cargo.toml +++ b/bin/ethereum/Cargo.toml @@ -23,3 +23,4 @@ disable-dev-mode = ["zeth/disable-dev-mode"] cuda = ["zeth/cuda"] metal = ["zeth/metal"] prove = ["zeth/prove"] +erigon-range-queries = ["zeth-preflight-ethereum/erigon-range-queries"] diff --git a/bin/ethereum/data/ethereum/1-12965000.json.gz b/bin/ethereum/data/ethereum/1-12965000.json.gz index d631826c..290aa734 100644 Binary files a/bin/ethereum/data/ethereum/1-12965000.json.gz and b/bin/ethereum/data/ethereum/1-12965000.json.gz differ diff --git a/bin/optimism/Cargo.toml b/bin/optimism/Cargo.toml index 73f95caa..316f1c11 100644 --- a/bin/optimism/Cargo.toml +++ b/bin/optimism/Cargo.toml @@ -24,3 +24,4 @@ disable-dev-mode = ["zeth/disable-dev-mode"] cuda = ["zeth/cuda"] metal = ["zeth/metal"] prove = ["zeth/prove"] +erigon-range-queries = ["zeth-preflight-optimism/erigon-range-queries"] diff --git a/crates/core/src/mpt.rs b/crates/core/src/mpt.rs index eb0d0c23..864dd697 100644 --- a/crates/core/src/mpt.rs +++ b/crates/core/src/mpt.rs @@ -1,7 +1,6 @@ -use alloy_primitives::map::B256Set; use alloy_primitives::B256; use alloy_rlp::{Decodable, Encodable}; -use risc0_ethereum_trie::{orphan, CachedTrie}; +use risc0_ethereum_trie::{orphan, CachedTrie, Nibbles}; use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::marker::PhantomData; @@ -49,29 +48,18 @@ impl MptNode { } /// Tries to resolve the potential removal orphan corresponding to `key` from the given - /// post-removal proof. If the orphan cannot be resolved from the proof alone, the `key` - /// corresponding to the unresolved path is added to `unresolvable`. - pub fn resolve_orphan( + /// post-removal proof. If the orphan cannot be resolved from the proof alone, the + /// prefix of the missing MPT key is returned. + pub fn resolve_orphan, N: AsRef<[u8]>>( &mut self, key: K, post_state_proof: impl IntoIterator, - unresolvable: &mut B256Set, - ) -> anyhow::Result<()> - where - K: AsRef<[u8]>, - N: AsRef<[u8]>, - { - match self.inner.resolve_orphan(key, post_state_proof) { - Ok(_) => {} - Err(orphan::Error::Unresolvable(prefix)) => { - // convert the unresolvable prefix nibbles into a B256 key with zero padding - let key = B256::right_padding_from(&prefix.pack()); - unresolvable.insert(key); - } - Err(err) => return Err(err.into()), - }; - - Ok(()) + ) -> anyhow::Result> { + match self.inner.resolve_orphan(&key, post_state_proof) { + Ok(_) => Ok(None), + Err(orphan::Error::Unresolvable(prefix)) => Ok(Some(prefix)), + Err(err) => Err(err.into()), + } } #[inline] diff --git a/crates/preflight-ethereum/Cargo.toml b/crates/preflight-ethereum/Cargo.toml index 599a67dc..aa33ca7d 100644 --- a/crates/preflight-ethereum/Cargo.toml +++ b/crates/preflight-ethereum/Cargo.toml @@ -16,3 +16,6 @@ workspace = true alloy.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true + +[features] +erigon-range-queries = ["zeth-preflight/erigon-range-queries"] diff --git a/crates/preflight-optimism/Cargo.toml b/crates/preflight-optimism/Cargo.toml index 39866efa..403e666c 100644 --- a/crates/preflight-optimism/Cargo.toml +++ b/crates/preflight-optimism/Cargo.toml @@ -29,4 +29,7 @@ reth-primitives = { workspace = true, features = ["optimism"] } reth-revm = { workspace = true, features = ["optimism"] } reth-storage-errors.workspace = true -serde_json.workspace = true \ No newline at end of file +serde_json.workspace = true + +[features] +erigon-range-queries = ["zeth-preflight/erigon-range-queries"] diff --git a/crates/preflight/Cargo.toml b/crates/preflight/Cargo.toml index 503f2089..082b1828 100644 --- a/crates/preflight/Cargo.toml +++ b/crates/preflight/Cargo.toml @@ -15,6 +15,7 @@ itertools.workspace = true k256.workspace = true log.workspace = true pot.workspace = true +risc0-ethereum-trie.workspace = true rkyv.workspace = true serde.workspace = true serde_json.workspace = true @@ -24,3 +25,6 @@ reth-chainspec.workspace = true reth-revm.workspace = true reth-primitives.workspace = true reth-storage-errors.workspace = true + +[features] +erigon-range-queries = [] diff --git a/crates/preflight/src/client.rs b/crates/preflight/src/client.rs index 599c9f25..7db5f0d1 100644 --- a/crates/preflight/src/client.rs +++ b/crates/preflight/src/client.rs @@ -17,13 +17,15 @@ use crate::driver::PreflightDriver; use crate::provider::db::ProviderDB; use crate::provider::query::{BlockQuery, UncleQuery}; use crate::provider::{new_provider, Provider}; +use alloy::hex; use alloy::network::Network; -use alloy::primitives::map::{AddressHashMap, B256Set, HashSet}; +use alloy::primitives::map::{AddressHashMap, HashSet}; use alloy::primitives::{keccak256, Bytes, U256}; use alloy::rpc::types::EIP1186StorageProof; use anyhow::Context; use itertools::Itertools; use log::{debug, info, warn}; +use risc0_ethereum_trie::Nibbles; use std::cell::RefCell; use std::iter::zip; use std::path::PathBuf; @@ -258,8 +260,10 @@ where info!("Extending tries from post-state proofs..."); - let mut unresolvable_state_keys = B256Set::default(); + let mut resolved_state_keys = 0usize; + let mut resolved_storage_keys = 0usize; + let mut unresolved_state_keys: HashSet = HashSet::default(); for (address, account_proof) in latest_proofs { let db_key = keccak256(address); @@ -272,15 +276,18 @@ where } // otherwise, prepare trie for the removal of that key - state_trie - .resolve_orphan( - db_key, - account_proof.account_proof, - &mut unresolvable_state_keys, - ) - .with_context(|| format!("failed to resolve orphan for {}", address))?; + if let Some(unresolved_prefix) = state_trie + .resolve_orphan(db_key, account_proof.account_proof) + .with_context(|| format!("failed to resolve orphan for {}", address))? + { + debug!( + "unresolved key in state trie: {}", + hex::encode(unresolved_prefix.pack()) + ); + unresolved_state_keys.insert(unresolved_prefix); + } - let mut unresolvable_storage_keys = B256Set::default(); + let mut unresolved_storage_keys: HashSet = HashSet::default(); let storage_trie = &mut storage_tries.get_mut(&address).unwrap().storage_trie; for EIP1186StorageProof { key, proof, .. } in account_proof.storage_proof { @@ -290,35 +297,52 @@ where storage_trie.hydrate_from_rlp(proof)?; } else { // otherwise, prepare trie for the removal of that key - storage_trie - .resolve_orphan(db_key, proof, &mut unresolvable_storage_keys) + if let Some(unresolved_prefix) = storage_trie + .resolve_orphan(db_key, proof) .with_context(|| { format!("failed to resolve orphan for {}@{}", key.0, address) - })?; + })? + { + debug!( + "unresolved key in storage trie for {}: {}", + address, + hex::encode(unresolved_prefix.pack()) + ); + unresolved_storage_keys.insert(unresolved_prefix); + } } } // if orphans could not be resolved, use a range query to get that missing info - if !unresolvable_storage_keys.is_empty() { + if !unresolved_storage_keys.is_empty() { + resolved_storage_keys += unresolved_storage_keys.len(); let proof = preflight_db - .get_next_slot_proofs(block_count, address, unresolvable_storage_keys) - .with_context(|| format!("failed to get next slot for {}", address))?; + .get_storage_proofs_with_prefix(address, unresolved_storage_keys) + .with_context(|| { + format!("failed to get proof for unresolved slots for {}", address) + })?; storage_trie .hydrate_from_rlp(proof.storage_proof.iter().flat_map(|p| &p.proof)) .with_context(|| format!("invalid storage proof for {}", address))?; } } - for state_key in unresolvable_state_keys { + resolved_state_keys += unresolved_state_keys.len(); + for prefix in unresolved_state_keys { let proof = preflight_db - .get_next_account_proof(block_count, state_key) - .context("failed to get next account")?; + .get_account_proof_with_prefix(prefix) + .context("failed to get proof for unresolved account")?; state_trie .hydrate_from_rlp(proof.account_proof) .with_context(|| format!("invalid account proof for {}", proof.address))?; } - info!("Saving provider cache ..."); + info!( + "Resolved {} addresses and {} storage keys", + resolved_state_keys, resolved_storage_keys + ); + + info!("Saving provider cache..."); preflight_db.save_provider()?; // Increment block number counter diff --git a/crates/preflight/src/db.rs b/crates/preflight/src/db.rs index 7690ae96..4af955dc 100644 --- a/crates/preflight/src/db.rs +++ b/crates/preflight/src/db.rs @@ -15,17 +15,18 @@ use crate::driver::PreflightDriver; use crate::provider::db::ProviderDB; use crate::provider::get_proofs; -use crate::provider::query::{AccountRangeQuery, BlockQuery, ProofQuery, StorageRangeQuery}; +use crate::provider::query::{BlockQuery, NextAccountQuery, NextSlotQuery, ProofQuery}; use alloy::network::Network; use alloy::primitives::map::HashMap; -use alloy::primitives::{Address, B256, U256}; +use alloy::primitives::{keccak256, Address, B256, U256}; use alloy::rpc::types::EIP1186AccountProofResponse; -use anyhow::Context; -use log::{debug, error}; +use anyhow::{ensure, Context}; +use log::error; use reth_primitives::revm_primitives::{Account, AccountInfo, Bytecode}; use reth_revm::db::states::StateChangeset; use reth_revm::db::CacheDB; use reth_revm::{Database, DatabaseCommit, DatabaseRef}; +use risc0_ethereum_trie::Nibbles; use std::cell::{Ref, RefCell}; use std::collections::BTreeSet; use std::marker::PhantomData; @@ -252,25 +253,30 @@ impl> PreflightDB { Ok(headers) } - /// Fetches the EIP-1186 proof for the next account after a given key. + /// Retrieves an EIP-1186 account proof for an account whose hashed address starts with the + /// specified prefix. /// - /// This method retrieves an [EIP1186AccountProofResponse] for the account whose address, when - /// hashed, lexicographically follows the provided `start` key. The proof is generated for the - /// block `block_count` after the currently configured block in the provider. - pub fn get_next_account_proof( + /// The method finds the first account whose address when hashed with keccak256 starts with the + /// given prefix. Then, it retrieves and returns an [EIP1186AccountProofResponse] of that + /// account. + pub fn get_account_proof_with_prefix( &mut self, - block_count: u64, - start: B256, + prefix: Nibbles, ) -> anyhow::Result { let initial_db = self.inner.db.db.borrow_mut(); let provider_db = initial_db.db.borrow_db(); let mut provider = provider_db.provider.borrow_mut(); - let block_no = initial_db.db.borrow_db().block_no + block_count - 1; + let block_no = initial_db.db.borrow_db().block_no; + // convert the prefix nibbles into a B256 key with zero padding + let start = B256::right_padding_from(&prefix.pack()); - debug!("getting next account: start={}", start); let address = provider - .get_next_account(&AccountRangeQuery::new(block_no, start)) - .context("debug_accountRange call failed")?; + .get_next_account(&NextAccountQuery { block_no, start }) + .context("getting next account failed")?; + ensure!( + Nibbles::unpack(keccak256(address)).starts_with(&prefix), + "invalid provider response: address doesn't match prefix" + ); provider .get_proof(&ProofQuery { @@ -281,33 +287,40 @@ impl> PreflightDB { .context("eth_getProof call failed") } - /// Fetches EIP-1186 proofs for the next storage slots of a given account. + /// Retrieves an EIP-1186 account proof for storage slots whose key hashes match the given + /// prefixes. /// - /// This method retrieves an [EIP1186AccountProofResponse] for multiple storage slots of a given - /// account. For each `B256` key provided in the `starts` iterator, the method finds the next - /// storage slot whose hashed index lexicographically follows the given key. The proofs are - /// generated for the block `block_count` after the currently configured block in the provider. - pub fn get_next_slot_proofs( + /// For each prefix provided, the method finds the first storage slot whose key, when hashed + /// with keccak256, starts with the prefix. Then, it retrieves and returns an + /// [EIP1186AccountProofResponse] of all those storage slots in the given account. + pub fn get_storage_proofs_with_prefix( &mut self, - block_count: u64, address: Address, - starts: impl IntoIterator, + prefixes: impl IntoIterator, ) -> anyhow::Result { let initial_db = self.inner.db.db.borrow_mut(); let provider_db = initial_db.db.borrow_db(); let mut provider = provider_db.provider.borrow_mut(); - let block_no = initial_db.db.borrow_db().block_no + block_count - 1; + let block_no = initial_db.db.borrow_db().block_no; let mut indices = BTreeSet::new(); - for start in starts { - debug!( - "getting next storage key: address={},start={}", - address, start + for prefix in prefixes { + // convert the prefix nibbles into a B256 key with zero padding + let start = B256::right_padding_from(&prefix.pack()); + let key: B256 = provider + .get_next_slot(&NextSlotQuery { + block_no, + address, + start, + }) + .context("getting next storage key failed")? + .into(); + ensure!( + Nibbles::unpack(keccak256(key)).starts_with(&prefix), + "invalid provider response: storage key doesn't match prefix" ); - let slot = provider - .get_next_slot(&StorageRangeQuery::new(block_no, address, start)) - .context("debug_storageRangeAt call failed")?; - indices.insert(B256::from(slot)); + + indices.insert(B256::from(key)); } provider diff --git a/crates/preflight/src/provider/cache_provider.rs b/crates/preflight/src/provider/cache_provider.rs index 10f27a4a..101e2a43 100644 --- a/crates/preflight/src/provider/cache_provider.rs +++ b/crates/preflight/src/provider/cache_provider.rs @@ -189,7 +189,7 @@ impl Provider for CachedRpcProvider { Ok(out) } - fn get_next_account(&mut self, query: &AccountRangeQuery) -> anyhow::Result
{ + fn get_next_account(&mut self, query: &NextAccountQuery) -> anyhow::Result
{ let cache_out = self.cache.get_next_account(query); if cache_out.is_ok() { return cache_out; @@ -201,7 +201,7 @@ impl Provider for CachedRpcProvider { Ok(out) } - fn get_next_slot(&mut self, query: &StorageRangeQuery) -> anyhow::Result { + fn get_next_slot(&mut self, query: &NextSlotQuery) -> anyhow::Result { let cache_out = self.cache.get_next_slot(query); if cache_out.is_ok() { return cache_out; diff --git a/crates/preflight/src/provider/file_provider.rs b/crates/preflight/src/provider/file_provider.rs index 546088bf..dac8042d 100644 --- a/crates/preflight/src/provider/file_provider.rs +++ b/crates/preflight/src/provider/file_provider.rs @@ -14,8 +14,8 @@ use super::{ordered_map, MutProvider, Provider}; use crate::provider::query::{ - AccountQuery, AccountRangeQuery, BlockQuery, PreimageQuery, ProofQuery, StorageQuery, - StorageRangeQuery, UncleQuery, + AccountQuery, BlockQuery, NextAccountQuery, NextSlotQuery, PreimageQuery, ProofQuery, + StorageQuery, UncleQuery, }; use alloy::network::Network; use alloy::primitives::{Address, Bytes, U256}; @@ -64,9 +64,9 @@ pub struct FileProvider { #[serde(with = "ordered_map", default)] preimages: HashMap, #[serde(with = "ordered_map", default)] - next_accounts: HashMap, + next_accounts: HashMap, #[serde(with = "ordered_map", default)] - next_slots: HashMap, + next_slots: HashMap, } impl FileProvider { @@ -244,14 +244,14 @@ impl Provider for FileProvider { } } - fn get_next_account(&mut self, query: &AccountRangeQuery) -> anyhow::Result
{ + fn get_next_account(&mut self, query: &NextAccountQuery) -> anyhow::Result
{ match self.next_accounts.get(query) { Some(val) => Ok(*val), None => Err(anyhow!("No data for {:?}", query)), } } - fn get_next_slot(&mut self, query: &StorageRangeQuery) -> anyhow::Result { + fn get_next_slot(&mut self, query: &NextSlotQuery) -> anyhow::Result { match self.next_slots.get(query) { Some(val) => Ok(*val), None => Err(anyhow!("No data for {:?}", query)), @@ -314,12 +314,12 @@ impl MutProvider for FileProvider { self.dirty = true } - fn insert_next_account(&mut self, query: AccountRangeQuery, val: Address) { + fn insert_next_account(&mut self, query: NextAccountQuery, val: Address) { self.next_accounts.insert(query, val); self.dirty = true; } - fn insert_next_slot(&mut self, query: StorageRangeQuery, val: U256) { + fn insert_next_slot(&mut self, query: NextSlotQuery, val: U256) { self.next_slots.insert(query, val); self.dirty = true; } diff --git a/crates/preflight/src/provider/mod.rs b/crates/preflight/src/provider/mod.rs index 05c4f66e..47abbee1 100644 --- a/crates/preflight/src/provider/mod.rs +++ b/crates/preflight/src/provider/mod.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::provider::query::{AccountRangeQuery, PreimageQuery, StorageRangeQuery}; +use crate::provider::query::{NextAccountQuery, NextSlotQuery, PreimageQuery}; use alloy::network::Network; use alloy::primitives::map::HashMap; use alloy::primitives::{Address, Bytes, B256, U256}; @@ -96,8 +96,8 @@ pub trait Provider: Send { fn get_code(&mut self, query: &AccountQuery) -> anyhow::Result; fn get_storage(&mut self, query: &StorageQuery) -> anyhow::Result; fn get_preimage(&mut self, query: &PreimageQuery) -> anyhow::Result; - fn get_next_account(&mut self, query: &AccountRangeQuery) -> anyhow::Result
; - fn get_next_slot(&mut self, query: &StorageRangeQuery) -> anyhow::Result; + fn get_next_account(&mut self, query: &NextAccountQuery) -> anyhow::Result
; + fn get_next_slot(&mut self, query: &NextSlotQuery) -> anyhow::Result; } pub trait MutProvider: Provider { @@ -112,8 +112,8 @@ pub trait MutProvider: Provider { fn insert_code(&mut self, query: AccountQuery, val: Bytes); fn insert_storage(&mut self, query: StorageQuery, val: U256); fn insert_preimage(&mut self, query: PreimageQuery, val: Bytes); - fn insert_next_account(&mut self, query: AccountRangeQuery, val: Address); - fn insert_next_slot(&mut self, query: StorageRangeQuery, val: U256); + fn insert_next_account(&mut self, query: NextAccountQuery, val: Address); + fn insert_next_slot(&mut self, query: NextSlotQuery, val: U256); } pub fn get_proofs( diff --git a/crates/preflight/src/provider/query.rs b/crates/preflight/src/provider/query.rs index 0307fafd..1f166469 100644 --- a/crates/preflight/src/provider/query.rs +++ b/crates/preflight/src/provider/query.rs @@ -53,33 +53,15 @@ pub struct PreimageQuery { } #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Deserialize, Serialize)] -pub struct AccountRangeQuery { +pub struct NextAccountQuery { pub block_no: u64, pub start: B256, - pub max_results: u64, - pub no_code: bool, - pub no_storage: bool, - pub incompletes: bool, -} - -impl AccountRangeQuery { - pub fn new(block_no: u64, start: B256) -> Self { - Self { - block_no, - start, - max_results: 1, - no_code: true, - no_storage: true, - incompletes: true, - } - } } #[derive(Clone, Debug, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct AccountRangeQueryResponse { - pub root: B256, - pub accounts: HashMap, + pub accounts: HashMap, #[serde(skip_serializing_if = "Option::is_none", default)] pub next: Option, } @@ -88,32 +70,19 @@ pub struct AccountRangeQueryResponse { #[serde(rename_all = "camelCase")] pub struct AccountRangeQueryResponseEntry { pub balance: U256, - pub nonce: U256, + #[serde(with = "alloy::serde::quantity")] + pub nonce: u64, pub root: B256, pub code_hash: B256, - pub address: Address, + pub address: Option
, pub key: B256, } #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Deserialize, Serialize)] -pub struct StorageRangeQuery { +pub struct NextSlotQuery { pub block_no: u64, - pub tx_index: u64, pub address: Address, pub start: B256, - pub max_results: u64, -} - -impl StorageRangeQuery { - pub fn new(block_no: u64, address: Address, start: B256) -> Self { - Self { - block_no, - tx_index: 0, - address, - start, - max_results: 1, - } - } } #[derive(Clone, Debug, Default, Serialize, Deserialize)] @@ -125,7 +94,8 @@ pub struct StorageRangeQueryResponse { } #[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub struct StorageRangeQueryResponseEntry { - pub key: U256, + pub key: Option, pub value: U256, } diff --git a/crates/preflight/src/provider/rpc_provider.rs b/crates/preflight/src/provider/rpc_provider.rs index 9c91d694..fc705f39 100644 --- a/crates/preflight/src/provider/rpc_provider.rs +++ b/crates/preflight/src/provider/rpc_provider.rs @@ -12,16 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::provider::query::{AccountRangeQueryResponse, StorageRangeQueryResponse}; use crate::provider::*; +use alloy::eips::BlockId; use alloy::network::{BlockResponse, HeaderResponse, Network}; use alloy::providers::{Provider as AlloyProvider, ProviderBuilder, RootProvider}; use alloy::rpc::client::RpcClient; use alloy::transports::{ http::{Client, Http}, layers::{RetryBackoffLayer, RetryBackoffService}, + TransportResult, }; -use anyhow::anyhow; +use anyhow::{anyhow, ensure, Context}; use log::{debug, error}; use std::future::IntoFuture; @@ -47,6 +48,126 @@ impl RpcProvider { tokio_handle, }) } + + async fn account_range( + &self, + block: impl Into, + start: B256, + limit: u64, + incomplete: bool, + ) -> TransportResult { + self.http_client + .client() + .request( + "debug_accountRange", + (block.into(), start, limit, true, true, incomplete), + ) + .await + } + + async fn storage_range_at( + &self, + block_hash: B256, + tx_index: u64, + address: Address, + key_start: B256, + limit: u64, + ) -> TransportResult { + self.http_client + .client() + .request( + "debug_storageRangeAt", + (block_hash, tx_index, address, key_start, limit), + ) + .await + } + + #[cfg(not(feature = "erigon-range-queries"))] + fn get_next_storage_key( + &self, + block: B256, + address: Address, + key_start: B256, + ) -> anyhow::Result { + let out = self + .tokio_handle + .block_on( + self.storage_range_at(block, 0, address, key_start, 1) + .into_future(), + ) + .context("debug_storageRangeAt failed")?; + + let (hash, entry) = out.storage.iter().next().context("no such storage slot")?; + // Perform simple sanity checks, as this RPC is known to be wonky. + ensure!( + *hash >= key_start && out.next_key.map_or(true, |next| next > *hash), + "invalid debug_storageRangeAt response" + ); + let key = entry.key.context("preimage storage key is missing")?; + + Ok(key.0.into()) + } + + #[cfg(feature = "erigon-range-queries")] + fn get_next_storage_key( + &self, + block: B256, + address: Address, + key_start: B256, + ) -> anyhow::Result { + let mut min_found: Option<(B256, B256)> = None; // (hash, key) + + let mut paging_start = B256::ZERO; + const PAGE_SIZE: u64 = 125_000; // will result in responses < 25 MB + let mut page_count = 1; + + log::warn!( + "Querying all storage for address {} to find the next slot...(This might take a while)", + address, + ); + loop { + let out = self + .tokio_handle + .block_on( + self.storage_range_at(block, 0, address, paging_start, PAGE_SIZE) + .into_future(), + ) + .context("debug_storageRangeAt failed")?; + + for (hash, entry) in out.storage { + let Some(alloy::serde::JsonStorageKey(key)) = entry.key else { + anyhow::bail!("preimage storage key is missing"); + }; + + if hash >= key_start { + match min_found { + None => min_found = Some((hash, key)), + Some((min_hash, _)) => { + if hash < min_hash { + min_found = Some((hash, key)); + } + } + } + } + } + + match out.next_key { + Some(next_key) => { + let next_key_u256: U256 = next_key.into(); + debug!( + "Finished querying storage range {} / {}", + page_count, + U256::MAX.div_ceil(next_key_u256 / U256::from(page_count)) + ); + paging_start = next_key; + page_count += 1; + } + None => break, + } + } + + Ok(min_found.context("no such storage key")?.1.into()) + } } impl Provider for RpcProvider { @@ -216,62 +337,89 @@ impl Provider for RpcProvider { } } - fn get_next_account(&mut self, query: &AccountRangeQuery) -> anyhow::Result
{ - let out: AccountRangeQueryResponse = match self.tokio_handle.block_on( - self.http_client - .client() - .request( - "debug_accountRange", - ( - format!("{:066x}", query.block_no), - format!("{}", query.start), - query.max_results, - query.no_code, - query.no_storage, - query.incompletes, - ), - ) - .into_future(), - ) { - Ok(out) => out, - Err(e) => { - error!("debug_accountRange: {e}"); - anyhow::bail!(e) - } - }; + fn get_next_account(&mut self, query: &NextAccountQuery) -> anyhow::Result
{ + debug!("Querying RPC for next account: {:?}", query); - Ok(*out.accounts.keys().next().unwrap()) + let out = self + .tokio_handle + .block_on( + self.account_range(query.block_no, query.start, 1, true) + .into_future(), + ) + .context("debug_accountRange failed")?; + let entry = out.accounts.values().next().context("no such account")?; + // Perform simple sanity checks, as this RPC is known to be wonky. + ensure!( + entry.key >= query.start, + "invalid debug_accountRange response" + ); + + entry.address.context("preimage address is missing") } - fn get_next_slot(&mut self, query: &StorageRangeQuery) -> anyhow::Result { - let block = self.get_full_block(&BlockQuery { - block_no: query.block_no, - })?; - let hash = block.header().hash(); + fn get_next_slot(&mut self, query: &NextSlotQuery) -> anyhow::Result { + debug!("Querying RPC for next storage key: {:?}", query); - let out: StorageRangeQueryResponse = match self.tokio_handle.block_on( - self.http_client - .client() - .request( - "debug_storageRangeAt", - ( - // format!("{:#066x}", query.block_no), - format!("{hash}"), - query.tx_index, - query.address, - format!("{}", query.start), - query.max_results, - ), - ) - .into_future(), - ) { - Ok(out) => out, - Err(e) => { - error!("debug_storageRangeAt: {e}"); - anyhow::bail!(e) - } - }; + // debug_storageRangeAt returns the storage at the given block height and transaction index. + // For this to be consistent with eth_getProof, we need to query the state after all + // transactions have been processed, i.e. at transaction index 0 of the next block. + let block_no = query.block_no + 1; + + // debug_storageRangeAt only accepts the block hash, not the number, so we need to query it. + let block = self + .tokio_handle + .block_on(self.http_client.get_block_by_number(block_no.into(), false)) + .context("eth_getBlockByNumber failed")? + .with_context(|| format!("block {} not found", block_no))?; + let block_hash = block.header().hash(); - Ok(out.storage.values().next().unwrap().key) + self.get_next_storage_key(block_hash, query.address, query.start) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy::network::Ethereum; + use alloy::primitives::address; + use tokio::task::spawn_blocking; + + #[tokio::test] + #[ignore = "Requires RPC node and credentials"] + async fn get_next_slot() -> anyhow::Result<()> { + let rpc_url = std::env::var("ETH_RPC_URL").expect("ETH_RPC_URL not set"); + + let mut provider = RpcProvider::::new(rpc_url)?; + + let latest = provider.http_client.get_block_number().await?; + spawn_blocking(move || { + provider.get_next_slot(&NextSlotQuery { + block_no: latest - 1, + address: address!("0xdAC17F958D2ee523a2206206994597C13D831ec7"), + start: B256::ZERO, + }) + }) + .await??; + + Ok(()) + } + + #[tokio::test] + #[ignore = "Requires RPC node and credentials"] + async fn get_next_account() -> anyhow::Result<()> { + let rpc_url = std::env::var("ETH_RPC_URL").expect("ETH_RPC_URL not set"); + + let mut provider = RpcProvider::::new(rpc_url)?; + + let latest = provider.http_client.get_block_number().await?; + spawn_blocking(move || { + provider.get_next_account(&NextAccountQuery { + block_no: latest, + start: B256::ZERO, + }) + }) + .await??; + + Ok(()) } } diff --git a/guests/reth-ethereum/src/main.rs b/guests/reth-ethereum/src/main.rs index 630abccf..e1603144 100644 --- a/guests/reth-ethereum/src/main.rs +++ b/guests/reth-ethereum/src/main.rs @@ -33,10 +33,6 @@ fn main() { ) .expect("Failed to load client data from stdin"); let validation_depth = stateless_client_data.blocks.len() as u64; - assert!( - stateless_client_data.chain.is_ethereum(), - "This program only supports Ethereum chains" - ); let chain_id = stateless_client_data.chain as u64; // Build the block env::log("Validating blocks"); diff --git a/justfile b/justfile index 1e3afc01..90593ae0 100644 --- a/justfile +++ b/justfile @@ -53,7 +53,7 @@ test-cache-eth +ARGS="": (build ARGS) RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=9069000 RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=9200000 RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=12244000 - # RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=12965000 + RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=12965000 RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=13773000 RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=15050000 RUST_LOG=info ./target/debug/zeth-ethereum build --cache=bin/ethereum/data -b=15537394 diff --git a/testing/ef-tests/src/provider.rs b/testing/ef-tests/src/provider.rs index c9f85df7..6b7cf031 100644 --- a/testing/ef-tests/src/provider.rs +++ b/testing/ef-tests/src/provider.rs @@ -11,17 +11,11 @@ use alloy_trie::proof::ProofRetainer; use anyhow::anyhow; use nybbles::Nibbles; use reth_chainspec::NamedChain; -use std::{ - collections::{ - BTreeMap, - Bound::{Excluded, Unbounded}, - }, - iter, vec, -}; +use std::{collections::BTreeMap, iter, vec}; use zeth_preflight::provider::{ query::{ - AccountQuery, AccountRangeQuery, BlockQuery, PreimageQuery, ProofQuery, StorageQuery, - StorageRangeQuery, UncleQuery, + AccountQuery, BlockQuery, NextAccountQuery, NextSlotQuery, PreimageQuery, ProofQuery, + StorageQuery, UncleQuery, }, Provider, }; @@ -158,14 +152,14 @@ impl Provider for TestProvider { unimplemented!("get_preimage") } - fn get_next_account(&mut self, query: &AccountRangeQuery) -> anyhow::Result
{ + fn get_next_account(&mut self, query: &NextAccountQuery) -> anyhow::Result
{ assert_eq!(query.block_no, 0); self.pre .get_next_account(query.start) .ok_or(anyhow!("no next account")) } - fn get_next_slot(&mut self, query: &StorageRangeQuery) -> anyhow::Result { + fn get_next_slot(&mut self, query: &NextSlotQuery) -> anyhow::Result { assert_eq!(query.block_no, 0); let next = self .pre @@ -232,7 +226,7 @@ impl ProviderState { } fn get_next_account(&self, start: B256) -> Option
{ - let next = self.0.range((Excluded(start), Unbounded)).next(); + let next = self.0.range(start..).next(); next.map(|(_, v)| v.address) } @@ -240,7 +234,7 @@ impl ProviderState { let Some(account) = self.0.get(&keccak256(address)) else { return None; }; - let next = account.storage.range((Excluded(start), Unbounded)).next(); + let next = account.storage.range(start..).next(); next.map(|(_, v)| v.0) } }