Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix : snos orch blob data #176

Merged
merged 12 commits into from
Nov 6, 2024
2 changes: 1 addition & 1 deletion .env.test
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ L1_CORE_CONTRACT_ADDRESS="0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057"

##### SNOS #####
## This is needed right now because Madara doesn't support getProof
RPC_FOR_SNOS=""
RPC_FOR_SNOS="http://localhost:9944"


##### STARKNET SETTLEMENT #####
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).

## Changed

- removed error return in case of JobAlreadyExists in `create_job` function
- update_job returns the updated job item
- made create_job atomic to avoid race conditions
- handle jobs in tokio tasks
Expand All @@ -68,6 +69,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).

## Fixed

- blob data formation process from state update
- OTEL config refactor
- indexing for get_jobs_without_successor
- wait for transaction logic in ethereum settlement client
Expand Down
142 changes: 129 additions & 13 deletions crates/orchestrator/src/jobs/da_job/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -396,25 +396,33 @@ fn da_word(class_flag: bool, nonce_change: Option<Felt>, num_changes: u64) -> Fe
}

fn refactor_state_update(state_update: &mut StateDiff) {
apoorvsadana marked this conversation as resolved.
Show resolved Hide resolved
let addresses_in_nonces: Vec<Felt> = state_update.nonces.clone().iter().map(|item| item.contract_address).collect();
let addresses_in_storage_diffs: Vec<Felt> =
state_update.storage_diffs.clone().iter().map(|item| item.address).collect();
// Create set of all addresses that need storage diffs
let mut required_addresses: HashSet<Felt> = HashSet::new();

let address_to_insert = find_unique_addresses(addresses_in_nonces, addresses_in_storage_diffs);
// First add all existing storage addresses
required_addresses.extend(state_update.storage_diffs.iter().map(|item| item.address));

for address in address_to_insert {
state_update.storage_diffs.push(ContractStorageDiffItem { address, storage_entries: vec![] })
}
}
// Add addresses from nonces
required_addresses.extend(state_update.nonces.iter().map(|item| item.contract_address));

fn find_unique_addresses(nonce_addresses: Vec<Felt>, storage_diff_addresses: Vec<Felt>) -> Vec<Felt> {
let storage_set: HashSet<_> = storage_diff_addresses.into_iter().collect();
// Add addresses from deployed contracts
required_addresses.extend(state_update.deployed_contracts.iter().map(|item| item.address));

nonce_addresses.into_iter().filter(|addr| !storage_set.contains(addr)).collect()
// Create a set of existing storage addresses for quick lookup
let existing_storage_addresses: HashSet<Felt> =
state_update.storage_diffs.iter().map(|item| item.address).collect();

// Add storage diffs only for addresses that don't already have them
for address in required_addresses {
if !existing_storage_addresses.contains(&address) {
state_update.storage_diffs.push(ContractStorageDiffItem { address, storage_entries: Vec::new() });
}
}
}

#[cfg(test)]
pub mod test {
use std::collections::HashSet;
use std::fs;
use std::fs::File;
use std::io::Read;
Expand All @@ -427,12 +435,14 @@ pub mod test {
use majin_blob_types::serde;
use rstest::rstest;
use serde_json::json;
use starknet::core::types::{Felt, StateUpdate};
use starknet::core::types::{
ContractStorageDiffItem, DeployedContractItem, Felt, NonceUpdate, StateDiff, StateUpdate, StorageEntry,
};
use starknet::providers::jsonrpc::HttpTransport;
use starknet::providers::JsonRpcClient;
use url::Url;

use crate::jobs::da_job::da_word;
use crate::jobs::da_job::{da_word, refactor_state_update};

/// Tests `da_word` function with various inputs for class flag, new nonce, and number of
/// changes. Verifies that `da_word` produces the correct Felt based on the provided
Expand Down Expand Up @@ -486,6 +496,16 @@ pub mod test {
"src/tests/jobs/da_job/test_data/test_blob/671070.txt",
"src/tests/jobs/da_job/test_data/nonces/671070.txt"
)]
// Block from pragma madara and orch test run. Here we faced an issue where our
// blob building logic was not able to take the contract addresses from
// `deployed_contracts` field in state diff from state update. This test case
// was added after the fix
#[case(
178,
"src/tests/jobs/da_job/test_data/state_update/178.txt",
"src/tests/jobs/da_job/test_data/test_blob/178.txt",
"src/tests/jobs/da_job/test_data/nonces/178.txt"
)]
#[tokio::test]
async fn test_state_update_to_blob_data(
#[case] block_no: u64,
Expand Down Expand Up @@ -570,6 +590,71 @@ pub mod test {
assert_eq!(data, deserialize_data);
}

#[rstest]
#[case(vec![], vec![], vec![], 0)] // Empty case
#[case(
vec![(Felt::from(1), Felt::from(10)), (Felt::from(2), Felt::from(20))],
vec![],
vec![],
2
)] // Only nonces
#[case(
vec![],
vec![],
vec![(Felt::from(1), vec![1]), (Felt::from(2), vec![2])],
2
)] // Only deployed
#[case(
vec![(Felt::from(1), Felt::from(10))],
vec![(Felt::from(1), vec![(Felt::from(1), Felt::from(100))])],
vec![(Felt::from(1), vec![1])],
1
)] // Overlapping addresses
#[case(
vec![(Felt::from(1), Felt::from(10)), (Felt::from(1), Felt::from(20))],
vec![],
vec![(Felt::from(1), vec![1]), (Felt::from(1), vec![2])],
1
)] // Duplicate addresses
fn test_refactor_state_update(
#[case] nonces: Vec<(Felt, Felt)>,
#[case] storage_diffs: Vec<(Felt, Vec<(Felt, Felt)>)>,
#[case] deployed_contracts: Vec<(Felt, Vec<u8>)>,
#[case] expected_storage_count: usize,
) {
// Create initial state
let mut state_diff = create_state_diff(nonces, storage_diffs.clone(), deployed_contracts);
let initial_storage = state_diff.storage_diffs.clone();

// Run the function
refactor_state_update(&mut state_diff);

// Verify results
assert!(verify_addresses_have_storage_diffs(&state_diff));

// Verify no duplicate addresses in storage_diffs
let unique_addresses: HashSet<_> = state_diff.storage_diffs.iter().map(|item| &item.address).collect();
apoorvsadana marked this conversation as resolved.
Show resolved Hide resolved
assert_eq!(
unique_addresses.len(),
state_diff.storage_diffs.len(),
"Number of unique addresses should match number of storage diffs"
);
assert_eq!(
unique_addresses.len(),
expected_storage_count,
"Number of storage diffs should match expected count"
);

// Verify storage preservation for existing entries
for orig_storage in initial_storage {
apoorvsadana marked this conversation as resolved.
Show resolved Hide resolved
if let Some(current_storage) =
state_diff.storage_diffs.iter().find(|item| item.address == orig_storage.address)
{
assert_eq!(orig_storage.storage_entries, current_storage.storage_entries);
}
}
}

pub(crate) fn read_state_update_from_file(file_path: &str) -> Result<StateUpdate> {
// let file_path = format!("state_update_block_no_{}.txt", block_no);
let mut file = File::open(file_path)?;
Expand Down Expand Up @@ -616,4 +701,35 @@ pub mod test {
new_hex_chars = new_hex_chars.trim_start_matches('0').to_string();
if new_hex_chars.is_empty() { "0x0".to_string() } else { format!("0x{}", new_hex_chars) }
}

fn create_state_diff(
nonces: Vec<(Felt, Felt)>,
storage_diffs: Vec<(Felt, Vec<(Felt, Felt)>)>,
deployed_contracts: Vec<(Felt, Vec<u8>)>,
) -> StateDiff {
StateDiff {
nonces: nonces.into_iter().map(|(addr, nonce)| NonceUpdate { contract_address: addr, nonce }).collect(),
storage_diffs: storage_diffs
.into_iter()
.map(|(addr, entries)| ContractStorageDiffItem {
address: addr,
storage_entries: entries.into_iter().map(|(key, value)| StorageEntry { key, value }).collect(),
})
.collect(),
deprecated_declared_classes: vec![],
declared_classes: vec![],
deployed_contracts: deployed_contracts
.into_iter()
.map(|(addr, _code)| DeployedContractItem { address: addr, class_hash: Default::default() })
Mohiiit marked this conversation as resolved.
Show resolved Hide resolved
.collect(),
replaced_classes: vec![],
}
}

fn verify_addresses_have_storage_diffs(state_diff: &StateDiff) -> bool {
Mohiiit marked this conversation as resolved.
Show resolved Hide resolved
let storage_addresses: HashSet<_> = state_diff.storage_diffs.iter().map(|item| &item.address).collect();

state_diff.nonces.iter().all(|item| storage_addresses.contains(&item.contract_address))
&& state_diff.deployed_contracts.iter().all(|item| storage_addresses.contains(&item.address))
}
}
3 changes: 2 additions & 1 deletion crates/orchestrator/src/jobs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ pub async fn create_job(

// this is technically a redundant check, we've another check inside `create_job`
if existing_job.is_some() {
return Err(JobError::JobAlreadyExists { internal_id, job_type });
log::warn!("Job already exists for internal_id {internal_id:?} and job_type {job_type:?}. Skipping!");
return Ok(());
}

let job_handler = factory::get_job_handler(&job_type).await;
Expand Down
10 changes: 10 additions & 0 deletions crates/orchestrator/src/tests/jobs/da_job/test_data/nonces/178.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[
{
"nonce": "0",
"address": "2087021424722619777119509474943472645767659996348769578120564519014510906823"
},
{
"nonce": "0",
"address": "2227221089168209069826941066512062806409572016263164839237971044383978786453"
}
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
{
"block_hash": "0x385bbbcaada37fbd2be407d02479a86faf632294161451159433f1687afa80a",
"old_root": "0x39cc0b3f01d1b4b06a57f1e77d0be8093b9be560380a5b88bc2fefd9932d129",
"new_root": "0x249a0f2b937b67ec50021fe7520937d3b8f94564e6d1f935d3b009bbbc45645",
"state_diff": {
"storage_diffs": [
{
"address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7",
"storage_entries": [
{
"key": "0x68803f24598877e0945e64be0baadefff792986d2c5a2b6e7c8aa334d1c0e52",
"value": "0xf43fc2c03458b02"
},
{
"key": "0x709c6298f62a9011c05499b9f5ccce4ecc3e0753e48096edef484c409c25181",
"value": "0x189f07e"
}
]
},
{
"address": "0x4ec8ffda0fb4db938740a4fa54b3962852b8af5cf323b3d5f6f7ee102258e95",
"storage_entries": [
{
"key": "0x2b1577440dd7bedf920cb6de2f9fc6bf7ba98c78c85a3fa1f8311aac95e1759",
"value": "0x7c"
},
{
"key": "0x3b56d7f084d750efc42861fede4548333aff235c695a18e4747698157962a0b",
"value": "0x3c3af083273bcb2bedc6d7f38055d79a"
},
{
"key": "0x3b56d7f084d750efc42861fede4548333aff235c695a18e4747698157962a0a",
"value": "0x3a4ab88085f30eb952f488e972dc0035"
}
]
},
{
"address": "0x1",
"storage_entries": [
{
"key": "0xa8",
"value": "0x2ba8421b4e16ae6c77ba5d61446527e4320d2a57465f11147386a3b2f5f7758"
}
]
}
],
"deprecated_declared_classes": [],
"declared_classes": [
{
"class_hash": "0x29ed6994318833535323c098faa060a20120dc763c281dbe5fb9541a7eaf6c5",
"compiled_class_hash": "0xb0ea1eb7b2bbd82380f7b1237ea2fb047719137bf2a7d019dd5e468accc007"
}
],
"deployed_contracts": [
{
"address": "0x4220016ed41491dce777cc24efc06a1e88722c3e70506313f504573bce64627",
"class_hash": "0x29ed6994318833535323c098faa060a20120dc763c281dbe5fb9541a7eaf6c5"
}
],
"replaced_classes": [],
"nonces": [
{
"contract_address": "0x4fe5eea46caa0a1f344fafce82b39d66b552f00d3cd12e89073ef4b4ab37860",
"nonce": "0xd0"
}
]
}
}

Large diffs are not rendered by default.

4 changes: 0 additions & 4 deletions crates/orchestrator/src/tests/jobs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,10 +98,6 @@ async fn create_job_job_exists_in_db_works() {
let database_client = services.config.database();
database_client.create_job(job_item.clone()).await.unwrap();

assert!(
create_job(JobType::ProofCreation, "0".to_string(), HashMap::new(), services.config.clone()).await.is_err()
);

// Waiting for 5 secs for message to be passed into the queue
sleep(Duration::from_secs(5)).await;

Expand Down
2 changes: 1 addition & 1 deletion madara
2 changes: 1 addition & 1 deletion pathfinder
apoorvsadana marked this conversation as resolved.
Show resolved Hide resolved
Submodule pathfinder updated 42 files
+26 −16 crates/common/src/error.rs
+6 −0 crates/common/src/receipt.rs
+164 −2 crates/common/src/state_update.rs
+18 −14 crates/executor/src/types.rs
+2 −0 crates/gateway-types/src/reply.rs
+14 −0 crates/make-stream/src/lib.rs
+27 −4 crates/merkle-tree/src/class.rs
+53 −5 crates/merkle-tree/src/contract.rs
+9 −8 crates/merkle-tree/src/contract_state.rs
+207 −120 crates/merkle-tree/src/tree.rs
+1 −1 crates/p2p/src/builder.rs
+48 −42 crates/p2p/src/client/conv.rs
+52 −13 crates/p2p/src/client/peer_agnostic.rs
+12 −2 crates/p2p/src/client/peer_agnostic/fixtures.rs
+1 −0 crates/p2p_proto/proto/class.proto
+1 −0 crates/p2p_proto/proto/transaction.proto
+26 −5 crates/p2p_proto/src/class.rs
+25 −20 crates/p2p_proto/src/transaction.rs
+2 −2 crates/pathfinder/examples/re_execute.rs
+7 −1 crates/pathfinder/src/p2p_network/sync_handlers.rs
+3 −3 crates/pathfinder/src/p2p_network/sync_handlers/tests.rs
+0 −1 crates/pathfinder/src/state.rs
+7 −18 crates/pathfinder/src/state/sync.rs
+4 −4 crates/pathfinder/src/sync.rs
+7 −12 crates/pathfinder/src/sync/checkpoint.rs
+27 −0 crates/pathfinder/src/sync/checkpoint/fixture.rs
+4 −0 crates/pathfinder/src/sync/error.rs
+209 −8 crates/pathfinder/src/sync/state_updates.rs
+2 −6 crates/pathfinder/src/sync/track.rs
+23 −6 crates/rpc/src/dto/fee.rs
+11 −5 crates/rpc/src/dto/receipt.rs
+4 −5 crates/rpc/src/lib.rs
+120 −80 crates/rpc/src/method/estimate_fee.rs
+12 −8 crates/rpc/src/method/estimate_message_fee.rs
+76 −92 crates/rpc/src/method/get_storage_proof.rs
+33 −17 crates/rpc/src/pathfinder/methods/get_proof.rs
+46 −31 crates/rpc/src/v06/method/estimate_fee.rs
+12 −8 crates/rpc/src/v06/method/estimate_message_fee.rs
+4 −4 crates/rpc/src/v06/method/simulate_transactions.rs
+1 −0 crates/rpc/src/v08.rs
+1 −0 crates/storage/src/connection/transaction.rs
+1 −0 crates/storage/src/test_utils.rs
Loading