Skip to content

Commit

Permalink
Fix : snos orch blob data (#176)
Browse files Browse the repository at this point in the history
* fix : removed error return in create_job

* fix : removed error return in create_job

* fix : tests

* feat : updated da blob creation code

* feat : updated da blob creation code

* feat : added tests for refactor_state_update function and refactors

* chore : refactor

* resolved comments

* chore : refactor

---------

Co-authored-by: Heemank Verma <heemankv@gmail.com>
  • Loading branch information
ocdbytes and heemankv authored Nov 6, 2024
1 parent ac131a5 commit 803be5f
Show file tree
Hide file tree
Showing 6 changed files with 200 additions and 16 deletions.
1 change: 0 additions & 1 deletion .env.test
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ L1_CORE_CONTRACT_ADDRESS="0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057"
## This is needed right now because Madara doesn't support getProof
RPC_FOR_SNOS="http://localhost:9545"


##### STARKNET SETTLEMENT #####
STARKNET_PRIVATE_KEY=0x76f2ccdb23f29bc7b69278e947c01c6160a31cf02c19d06d0f6e5ab1d768b86
STARKNET_ACCOUNT_ADDRESS=0x3bb306a004034dba19e6cf7b161e7a4fef64bc1078419e8ad1876192f0b8cd1
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).

## Fixed

- blob data formation process from state update
- OTEL config refactor
- indexing for get_jobs_without_successor
- wait for transaction logic in ethereum settlement client
Expand Down
135 changes: 120 additions & 15 deletions crates/orchestrator/src/jobs/da_job/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -396,25 +396,25 @@ fn da_word(class_flag: bool, nonce_change: Option<Felt>, num_changes: u64) -> Fe
}

fn refactor_state_update(state_update: &mut StateDiff) {
let addresses_in_nonces: Vec<Felt> = state_update.nonces.clone().iter().map(|item| item.contract_address).collect();
let addresses_in_storage_diffs: Vec<Felt> =
state_update.storage_diffs.clone().iter().map(|item| item.address).collect();
let existing_storage: HashSet<_> = state_update.storage_diffs.iter().map(|item| item.address).collect();

let address_to_insert = find_unique_addresses(addresses_in_nonces, addresses_in_storage_diffs);

for address in address_to_insert {
state_update.storage_diffs.push(ContractStorageDiffItem { address, storage_entries: vec![] })
}
}

fn find_unique_addresses(nonce_addresses: Vec<Felt>, storage_diff_addresses: Vec<Felt>) -> Vec<Felt> {
let storage_set: HashSet<_> = storage_diff_addresses.into_iter().collect();
// Collect new addresses, using a HashSet for deduplication
let new_addresses: HashSet<_> = Iterator::chain(
state_update.nonces.iter().map(|item| item.contract_address),
state_update.deployed_contracts.iter().map(|item| item.address),
)
.filter(|address| !existing_storage.contains(address))
.collect();

nonce_addresses.into_iter().filter(|addr| !storage_set.contains(addr)).collect()
// Add new storage diffs in batch
state_update.storage_diffs.extend(
new_addresses.into_iter().map(|address| ContractStorageDiffItem { address, storage_entries: Vec::new() }),
);
}

#[cfg(test)]
pub mod test {
use std::collections::HashSet;
use std::fs;
use std::fs::File;
use std::io::Read;
Expand All @@ -427,12 +427,14 @@ pub mod test {
use majin_blob_types::serde;
use rstest::rstest;
use serde_json::json;
use starknet::core::types::{Felt, StateUpdate};
use starknet::core::types::{
ContractStorageDiffItem, DeployedContractItem, Felt, NonceUpdate, StateDiff, StateUpdate, StorageEntry,
};
use starknet::providers::jsonrpc::HttpTransport;
use starknet::providers::JsonRpcClient;
use url::Url;

use crate::jobs::da_job::da_word;
use crate::jobs::da_job::{da_word, refactor_state_update};

/// Tests `da_word` function with various inputs for class flag, new nonce, and number of
/// changes. Verifies that `da_word` produces the correct Felt based on the provided
Expand Down Expand Up @@ -486,6 +488,16 @@ pub mod test {
"src/tests/jobs/da_job/test_data/test_blob/671070.txt",
"src/tests/jobs/da_job/test_data/nonces/671070.txt"
)]
// Block from pragma madara and orch test run. Here we faced an issue where our
// blob building logic was not able to take the contract addresses from
// `deployed_contracts` field in state diff from state update. This test case
// was added after the fix
#[case(
178,
"src/tests/jobs/da_job/test_data/state_update/178.txt",
"src/tests/jobs/da_job/test_data/test_blob/178.txt",
"src/tests/jobs/da_job/test_data/nonces/178.txt"
)]
#[tokio::test]
async fn test_state_update_to_blob_data(
#[case] block_no: u64,
Expand Down Expand Up @@ -570,6 +582,47 @@ pub mod test {
assert_eq!(data, deserialize_data);
}

#[rstest]
#[case::empty_case(vec![], vec![], vec![], 0)]
#[case::only_nonces(
vec![(Felt::from(1), Felt::from(10)), (Felt::from(2), Felt::from(20))],
vec![],
vec![],
2
)]
#[case::only_deployed(
vec![],
vec![],
vec![(Felt::from(1), vec![1]), (Felt::from(2), vec![2])],
2
)]
#[case::overlapping_addresses(
vec![(Felt::from(1), Felt::from(10))],
vec![(Felt::from(1), vec![(Felt::from(1), Felt::from(100))])],
vec![(Felt::from(1), vec![1])],
1
)]
#[case::duplicate_addresses(
vec![(Felt::from(1), Felt::from(10)), (Felt::from(1), Felt::from(20))],
vec![],
vec![(Felt::from(1), vec![1]), (Felt::from(1), vec![2])],
1
)]
fn test_refactor_state_update(
#[case] nonces: Vec<(Felt, Felt)>,
#[case] storage_diffs: Vec<(Felt, Vec<(Felt, Felt)>)>,
#[case] deployed_contracts: Vec<(Felt, Vec<u8>)>,
#[case] expected_storage_count: usize,
) {
let mut state_diff = create_state_diff(nonces, storage_diffs.clone(), deployed_contracts);
let initial_storage = state_diff.storage_diffs.clone();

refactor_state_update(&mut state_diff);

assert!(verify_addresses_have_storage_diffs(&state_diff, &initial_storage));
verify_unique_addresses(&state_diff, expected_storage_count);
}

pub(crate) fn read_state_update_from_file(file_path: &str) -> Result<StateUpdate> {
// let file_path = format!("state_update_block_no_{}.txt", block_no);
let mut file = File::open(file_path)?;
Expand Down Expand Up @@ -616,4 +669,56 @@ pub mod test {
new_hex_chars = new_hex_chars.trim_start_matches('0').to_string();
if new_hex_chars.is_empty() { "0x0".to_string() } else { format!("0x{}", new_hex_chars) }
}

fn create_state_diff(
nonces: Vec<(Felt, Felt)>,
storage_diffs: Vec<(Felt, Vec<(Felt, Felt)>)>,
deployed_contracts: Vec<(Felt, Vec<u8>)>,
) -> StateDiff {
StateDiff {
nonces: nonces.into_iter().map(|(addr, nonce)| NonceUpdate { contract_address: addr, nonce }).collect(),
storage_diffs: storage_diffs
.into_iter()
.map(|(addr, entries)| ContractStorageDiffItem {
address: addr,
storage_entries: entries.into_iter().map(|(key, value)| StorageEntry { key, value }).collect(),
})
.collect(),
deprecated_declared_classes: vec![],
declared_classes: vec![],
deployed_contracts: deployed_contracts
.into_iter()
.map(|(addr, _class_hash)| DeployedContractItem { address: addr, class_hash: Default::default() })
.collect(),
replaced_classes: vec![],
}
}

fn verify_unique_addresses(state_diff: &StateDiff, expected_count: usize) {
let unique_addresses: HashSet<_> = state_diff.storage_diffs.iter().map(|item| &item.address).collect();

assert_eq!(unique_addresses.len(), state_diff.storage_diffs.len(), "Storage diffs contain duplicate addresses");
assert_eq!(unique_addresses.len(), expected_count, "Unexpected number of storage diffs");
}

fn verify_addresses_have_storage_diffs(
state_diff: &StateDiff,
initial_storage: &Vec<ContractStorageDiffItem>,
) -> bool {
for orig_storage in initial_storage {
if let Some(current_storage) =
state_diff.storage_diffs.iter().find(|item| item.address == orig_storage.address)
{
assert_eq!(
orig_storage.storage_entries, current_storage.storage_entries,
"Storage entries changed unexpectedly"
);
}
}

let storage_addresses: HashSet<_> = state_diff.storage_diffs.iter().map(|item| &item.address).collect();

state_diff.nonces.iter().all(|item| storage_addresses.contains(&item.contract_address))
&& state_diff.deployed_contracts.iter().all(|item| storage_addresses.contains(&item.address))
}
}
10 changes: 10 additions & 0 deletions crates/orchestrator/src/tests/jobs/da_job/test_data/nonces/178.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[
{
"nonce": "0",
"address": "2087021424722619777119509474943472645767659996348769578120564519014510906823"
},
{
"nonce": "0",
"address": "2227221089168209069826941066512062806409572016263164839237971044383978786453"
}
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
{
"block_hash": "0x385bbbcaada37fbd2be407d02479a86faf632294161451159433f1687afa80a",
"old_root": "0x39cc0b3f01d1b4b06a57f1e77d0be8093b9be560380a5b88bc2fefd9932d129",
"new_root": "0x249a0f2b937b67ec50021fe7520937d3b8f94564e6d1f935d3b009bbbc45645",
"state_diff": {
"storage_diffs": [
{
"address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7",
"storage_entries": [
{
"key": "0x68803f24598877e0945e64be0baadefff792986d2c5a2b6e7c8aa334d1c0e52",
"value": "0xf43fc2c03458b02"
},
{
"key": "0x709c6298f62a9011c05499b9f5ccce4ecc3e0753e48096edef484c409c25181",
"value": "0x189f07e"
}
]
},
{
"address": "0x4ec8ffda0fb4db938740a4fa54b3962852b8af5cf323b3d5f6f7ee102258e95",
"storage_entries": [
{
"key": "0x2b1577440dd7bedf920cb6de2f9fc6bf7ba98c78c85a3fa1f8311aac95e1759",
"value": "0x7c"
},
{
"key": "0x3b56d7f084d750efc42861fede4548333aff235c695a18e4747698157962a0b",
"value": "0x3c3af083273bcb2bedc6d7f38055d79a"
},
{
"key": "0x3b56d7f084d750efc42861fede4548333aff235c695a18e4747698157962a0a",
"value": "0x3a4ab88085f30eb952f488e972dc0035"
}
]
},
{
"address": "0x1",
"storage_entries": [
{
"key": "0xa8",
"value": "0x2ba8421b4e16ae6c77ba5d61446527e4320d2a57465f11147386a3b2f5f7758"
}
]
}
],
"deprecated_declared_classes": [],
"declared_classes": [
{
"class_hash": "0x29ed6994318833535323c098faa060a20120dc763c281dbe5fb9541a7eaf6c5",
"compiled_class_hash": "0xb0ea1eb7b2bbd82380f7b1237ea2fb047719137bf2a7d019dd5e468accc007"
}
],
"deployed_contracts": [
{
"address": "0x4220016ed41491dce777cc24efc06a1e88722c3e70506313f504573bce64627",
"class_hash": "0x29ed6994318833535323c098faa060a20120dc763c281dbe5fb9541a7eaf6c5"
}
],
"replaced_classes": [],
"nonces": [
{
"contract_address": "0x4fe5eea46caa0a1f344fafce82b39d66b552f00d3cd12e89073ef4b4ab37860",
"nonce": "0xd0"
}
]
}
}

Large diffs are not rendered by default.

0 comments on commit 803be5f

Please sign in to comment.