diff --git a/Cargo.lock b/Cargo.lock index 0b4891540..7a1e339b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1800,6 +1800,7 @@ dependencies = [ "metrics-exporter-prometheus", "metrics-util", "prover-services", + "rand", "regex", "reqwest", "reth-primitives", @@ -1908,7 +1909,7 @@ dependencies = [ [[package]] name = "citrea-e2e" version = "0.1.0" -source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=f75fe92#f75fe92b0594724b9785eff857bb6aff861a2a55" +source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=ab88a607ca5aad2245d3c54f8822e02a1b71f50e#ab88a607ca5aad2245d3c54f8822e02a1b71f50e" dependencies = [ "anyhow", "async-trait", @@ -2039,6 +2040,7 @@ dependencies = [ "metrics", "metrics-derive", "once_cell", + "rand", "sov-db", "sov-ledger-rpc", "sov-mock-da", diff --git a/Cargo.toml b/Cargo.toml index 4bb5e4b9d..c6654f34d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -154,7 +154,7 @@ alloy-eips = { version = "0.4.2", default-features = false } alloy-consensus = { version = "0.4.2", default-features = false, features = ["serde", "serde-bincode-compat"] } alloy-network = { version = "0.4.2", default-features = false } -citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "f75fe92" } +citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "ab88a607ca5aad2245d3c54f8822e02a1b71f50e" } [patch.crates-io] bitcoincore-rpc = { version = "0.18.0", git = "https://github.com/chainwayxyz/rust-bitcoincore-rpc.git", rev = "ca3cfa2" } diff --git a/bin/citrea/Cargo.toml b/bin/citrea/Cargo.toml index 84eab5078..0428ad5e8 100644 --- a/bin/citrea/Cargo.toml +++ b/bin/citrea/Cargo.toml @@ -81,6 +81,7 @@ bincode = { workspace = true } borsh = { workspace = true } hex = { workspace = true } jmt = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } risc0-zkvm = { workspace = true, default-features = false, features = ["std"] } rs_merkle = { workspace = true } diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index 8e36c8f91..ce328412f 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -12,6 +12,7 @@ use citrea_sequencer::CitreaSequencer; use jsonrpsee::RpcModule; use sov_db::ledger_db::migrations::LedgerDBMigrator; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; +use sov_db::mmr_db::MmrDB; use sov_db::rocks_db_config::RocksdbConfig; use sov_db::schema::types::SoftConfirmationNumber; use sov_modules_api::Spec; @@ -449,6 +450,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { None, ); let ledger_db = self.create_ledger_db(&rocksdb_config); + let mmr_db = MmrDB::new(&rocksdb_config)?; let prover_service = self .create_prover_service( @@ -500,6 +502,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { batch_prover_code_commitments_by_spec, light_client_prover_code_commitment, light_client_prover_elfs, + mmr_db, task_manager, )?; diff --git a/bin/citrea/tests/bitcoin_e2e/light_client_test.rs b/bin/citrea/tests/bitcoin_e2e/light_client_test.rs index 30a4bb50b..2f494d49c 100644 --- a/bin/citrea/tests/bitcoin_e2e/light_client_test.rs +++ b/bin/citrea/tests/bitcoin_e2e/light_client_test.rs @@ -6,6 +6,7 @@ use alloy_primitives::U64; use async_trait::async_trait; use bitcoin_da::service::{BitcoinService, BitcoinServiceConfig, FINALITY_DEPTH}; use bitcoin_da::spec::{BitcoinSpec, RollupParams}; +use bitcoincore_rpc::RpcApi; use citrea_batch_prover::rpc::BatchProverRpcClient; use citrea_batch_prover::GroupCommitments; use citrea_common::tasks::manager::TaskManager; @@ -19,6 +20,7 @@ use citrea_e2e::test_case::{TestCase, TestCaseRunner}; use citrea_e2e::Result; use citrea_light_client_prover::rpc::LightClientProverRpcClient; use citrea_primitives::{TO_BATCH_PROOF_PREFIX, TO_LIGHT_CLIENT_PREFIX}; +use rand::{thread_rng, Rng}; use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, Receipt, ReceiptClaim}; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{BatchProofMethodId, DaTxRequest}; @@ -829,6 +831,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [1u8; 32], fork1_height + 1, method_ids[1].1, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) @@ -840,6 +844,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [3u8; 32], fork1_height * 3, method_ids[1].1, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) @@ -847,13 +853,14 @@ impl TestCase for LightClientUnverifiableBatchProofTest { .unwrap(); // Expect unparsable journal to be skipped - let unparsable_batch_proof = - create_serialized_fake_receipt_batch_proof_with_malformed_journal( - [3u8; 32], - [5u8; 32], - fork1_height * 4, - method_ids[1].1, - ); + let unparsable_batch_proof = create_serialized_fake_receipt_batch_proof( + [3u8; 32], + [5u8; 32], + fork1_height * 4, + method_ids[1].1, + None, + true, + ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unparsable_batch_proof), 1) .await @@ -864,6 +871,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [2u8; 32], fork1_height * 2, method_ids[1].1, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) @@ -877,6 +886,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [4u8; 32], fork1_height * 4, random_method_id, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_batch_proof), 1) @@ -923,40 +934,385 @@ async fn test_light_client_unverifiable_batch_proof() -> Result<()> { .await } -fn create_serialized_fake_receipt_batch_proof( - initial_state_root: [u8; 32], - final_state_root: [u8; 32], - last_l2_height: u64, - method_id: [u32; 8], -) -> Vec { - let batch_proof_output = BatchProofCircuitOutput:: { - initial_state_root, - final_state_root, - last_l2_height, - da_slot_hash: [0u8; 32].into(), - prev_soft_confirmation_hash: [0u8; 32], - final_soft_confirmation_hash: [0u8; 32], - state_diff: BTreeMap::new(), - sequencer_commitments_range: (0, 0), - sequencer_da_public_key: [0u8; 32].to_vec(), - sequencer_public_key: [0u8; 32].to_vec(), - preproven_commitments: vec![], - }; - let output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); +#[derive(Default)] +struct VerifyChunkedTxsInLightClient { + task_manager: TaskManager<()>, +} - let claim = MaybePruned::Value(ReceiptClaim::ok(method_id, output_serialized.clone())); - let fake_receipt = FakeReceipt::new(claim); - // Receipt with verifiable claim - let receipt = Receipt::new(InnerReceipt::Fake(fake_receipt), output_serialized.clone()); - bincode::serialize(&receipt).unwrap() +#[async_trait] +impl TestCase for VerifyChunkedTxsInLightClient { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_light_client_prover: true, + ..Default::default() + } + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + enable_recovery: false, + initial_da_height: 171, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + min_soft_confirmations_per_commitment: 10000, + ..Default::default() + } + } + + async fn cleanup(&self) -> Result<()> { + self.task_manager.abort().await; + Ok(()) + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let da = f.bitcoin_nodes.get(0).unwrap(); + let light_client_prover = f.light_client_prover.as_ref().unwrap(); + + let da_config = &da.config; + let bitcoin_da_service_config = BitcoinServiceConfig { + node_url: format!( + "http://127.0.0.1:{}/wallet/{}", + da_config.rpc_port, + NodeKind::Bitcoin + ), + node_username: da_config.rpc_user.clone(), + node_password: da_config.rpc_password.clone(), + network: bitcoin::Network::Regtest, + da_private_key: Some( + // This is the regtest private key of batch prover + "56D08C2DDE7F412F80EC99A0A328F76688C904BD4D1435281EFC9270EC8C8707".to_string(), + ), + tx_backup_dir: Self::test_config() + .dir + .join("tx_backup_dir") + .display() + .to_string(), + monitoring: Default::default(), + mempool_space_url: None, + }; + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + + let bitcoin_da_service = Arc::new( + BitcoinService::new_with_wallet_check( + bitcoin_da_service_config, + RollupParams { + to_light_client_prefix: TO_LIGHT_CLIENT_PREFIX.to_vec(), + to_batch_proof_prefix: TO_BATCH_PROOF_PREFIX.to_vec(), + }, + tx, + ) + .await + .unwrap(), + ); + + self.task_manager + .spawn(|tk| bitcoin_da_service.clone().run_da_queue(rx, tk)); + + da.generate(FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height().await?; + + // Wait for light client prover to create light client proof. + light_client_prover + .wait_for_l1_height(finalized_height, Some(TEN_MINS)) + .await + .unwrap(); + + // Expect light client prover to have generated light client proof + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(finalized_height) + .await?; + let lcp_output = lcp.unwrap().light_client_proof_output; + + // Get initial method ids and genesis state root + let method_ids = lcp_output.batch_proof_method_ids; + let genesis_state_root = lcp_output.state_root; + + let fork1_height = method_ids[1].0; + + // Even though the state diff is 100kb the proof will be 200kb because the fake receipt claim also has the journal + // But the compressed size will go down to 100kb + let state_diff_100kb = create_random_state_diff(100); + + // Create a 100kb (compressed size) batch proof (not 1mb because if testing feature is enabled max body size is 39700), this batch proof will consist of 3 chunk and 1 aggregate transactions because 100kb/40kb = 3 chunks + let verifiable_100kb_batch_proof = create_serialized_fake_receipt_batch_proof( + genesis_state_root, + [1u8; 32], + fork1_height + 1, + method_ids[1].1, + Some(state_diff_100kb.clone()), + false, + ); + + let _ = bitcoin_da_service + .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_100kb_batch_proof), 1) + .await + .unwrap(); + + // In total 3 chunks 1 aggregate with all of them having reveal and commit txs we should have 8 txs in mempool + da.wait_mempool_len(8, Some(TEN_MINS)).await?; + + // Finalize the DA block which contains the batch proof txs + da.generate(FINALITY_DEPTH).await?; + + // Make sure all of them are in the block + da.wait_mempool_len(0, Some(TEN_MINS)).await?; + + let batch_proof_l1_height = da.get_finalized_height().await?; + + // Wait for light client prover to process verifiable batch proof + light_client_prover + .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + .await + .unwrap(); + + // Expect light client prover to have generated light client proof + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height) + .await?; + + let lcp_output = lcp.unwrap().light_client_proof_output; + + // The batch proof should have updated the state root and the last l2 height + assert_eq!(lcp_output.state_root, [1u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height + 1); + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + + // Now generate another proof but this time: + // Have 4 chunks and 1 aggregate + // First two chunks will should be in block n + // Last two chunks should be in block n+1 + // And the aggregate should be in block n+2 + // After the block n+2 is processed we should see the state root updated + let state_diff_130kb = create_random_state_diff(130); + + let verifiable_130kb_batch_proof = create_serialized_fake_receipt_batch_proof( + [1u8; 32], + [2u8; 32], + fork1_height * 2, + method_ids[1].1, + Some(state_diff_130kb), + false, + ); + + let _ = bitcoin_da_service + .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_130kb_batch_proof), 1) + .await + .unwrap(); + + // In total 4 chunks 1 aggregate with all of them having reveal and commit txs we should have 8 txs in mempool + da.wait_mempool_len(10, Some(TEN_MINS)).await?; + + // Get txs from mempool + let txs = da.get_raw_mempool().await?; + + // // Get the first four txs ( first two chunks ) + let first_two_chunks = txs[0..4] + .iter() + .map(|txid| txid.to_string()) + .collect::>(); + let last_two_chunks = txs[4..8] + .iter() + .map(|txid| txid.to_string()) + .collect::>(); + let aggregate = txs[8..10] + .iter() + .map(|txid| txid.to_string()) + .collect::>(); + + let addr = da + .get_new_address(None, None) + .await? + .assume_checked() + .to_string(); + + da.generate_block(addr.clone(), first_two_chunks).await?; + // First two chunks should be in block n + da.wait_mempool_len(6, Some(TEN_MINS)).await?; + + da.generate_block(addr.clone(), last_two_chunks).await?; + // Last two chunks should be in block n+1 + da.wait_mempool_len(2, Some(TEN_MINS)).await?; + + da.generate_block(addr.clone(), aggregate).await?; + // Aggregate should be in block n+2 + da.wait_mempool_len(0, Some(TEN_MINS)).await?; + + // Finalize the DA block which contains the aggregate txs + da.generate(FINALITY_DEPTH - 1).await?; + + let batch_proof_l1_height = da.get_finalized_height().await?; + + // Wait for light client prover to process verifiable batch proof + light_client_prover + .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + .await + .unwrap(); + + // Expect light client prover to have generated light client proof + let lcp_first_chunks = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height - 2) + .await?; + + let lcp_output = lcp_first_chunks.unwrap().light_client_proof_output; + + // The batch proof should not have updated the state root and the last l2 height because these are only the chunks + assert_eq!(lcp_output.state_root, [1u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height + 1); + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + // There are two chunks so the size should be 2 + assert_eq!(lcp_output.mmr_guest.size, 2); + + let lcp_last_chunks = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height - 1) + .await?; + + let lcp_output = lcp_last_chunks.unwrap().light_client_proof_output; + + // The batch proof should not have updated the state root and the last l2 height because these are only the chunks + assert_eq!(lcp_output.state_root, [1u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height + 1); + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + // There are now four chunks in total so the size should be 4 + assert_eq!(lcp_output.mmr_guest.size, 4); + + // Expect light client prover to have generated light client proof + let lcp_aggregate = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height) + .await?; + + let lcp_output = lcp_aggregate.unwrap().light_client_proof_output; + + // The batch proof should have updated the state root and the last l2 height + assert_eq!(lcp_output.state_root, [2u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height * 2); + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + + let random_method_id = [1u32; 8]; + + // This should result in 3 chunks and 1 aggregate tx + let unverifiable_100kb_batch_proof = create_serialized_fake_receipt_batch_proof( + [2u8; 32], + [3u8; 32], + fork1_height * 3, + random_method_id, + Some(state_diff_100kb), + false, + ); + let _ = bitcoin_da_service + .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_100kb_batch_proof), 1) + .await + .unwrap(); + + // In total 3 chunks 1 aggregate with all of them having reveal and commit txs we should have 8 txs in mempool + da.wait_mempool_len(8, Some(TEN_MINS)).await?; + + // Finalize the DA block which contains the batch proof txs + da.generate(FINALITY_DEPTH).await?; + + // Make sure all of them are in the block + da.wait_mempool_len(0, Some(TEN_MINS)).await?; + + let batch_proof_l1_height = da.get_finalized_height().await?; + + // Wait for light client prover to process verifiable batch proof + light_client_prover + .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + .await + .unwrap(); + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height) + .await?; + + let lcp_output = lcp.unwrap().light_client_proof_output; + + // The batch proof should NOT have updated the state root and the last l2 height + // Because it is not verified + assert_eq!(lcp_output.state_root, [2u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height * 2); + // Also should not leave unchained outputs + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + + Ok(()) + } +} + +#[tokio::test] +async fn test_verify_chunked_txs_in_light_client() -> Result<()> { + TestCaseRunner::new(VerifyChunkedTxsInLightClient::default()) + .set_citrea_path(get_citrea_path()) + .run() + .await +} + +pub fn create_random_state_diff(size_in_kb: u64) -> BTreeMap, Option>> { + let mut rng = thread_rng(); + let mut map = BTreeMap::new(); + let mut total_size: u64 = 0; + + // Convert size to bytes + let size_in_bytes = size_in_kb * 1024; + + while total_size < size_in_bytes { + // Generate a random 32-byte key + let key: Vec = (0..32).map(|_| rng.gen::()).collect(); + + // Randomly decide if the value is `None` or a `Vec` of random length + let value: Option> = if rng.gen_bool(0.1) { + None + } else { + let value_size: usize = rng.gen_range(1..=2048); + Some((0..value_size).map(|_| rng.gen::()).collect()) + }; + + // Calculate the size of the key and value + let key_size = key.len() as u64; + let value_size = match &value { + Some(v) => v.len() as u64 + 1, + None => 1, + }; + + // Add to the map + map.insert(key, value); + + // Update the total size + total_size += key_size + value_size; + } + + map } -fn create_serialized_fake_receipt_batch_proof_with_malformed_journal( +fn create_serialized_fake_receipt_batch_proof( initial_state_root: [u8; 32], final_state_root: [u8; 32], last_l2_height: u64, method_id: [u32; 8], + state_diff: Option, Option>>>, + malformed_journal: bool, ) -> Vec { + let sequencer_da_public_key = vec![ + 2, 88, 141, 32, 42, 252, 193, 238, 74, 181, 37, 76, 120, 71, 236, 37, 185, 161, 53, 187, + 218, 15, 43, 198, 158, 225, 167, 20, 116, 159, 215, 125, 201, + ]; + let sequencer_public_key = vec![ + 32, 64, 64, 227, 100, 193, 15, 43, 236, 156, 31, 229, 0, 161, 205, 76, 36, 124, 137, 214, + 80, 160, 30, 215, 232, 44, 171, 168, 103, 135, 124, 33, + ]; let batch_proof_output = BatchProofCircuitOutput:: { initial_state_root, final_state_root, @@ -964,26 +1320,22 @@ fn create_serialized_fake_receipt_batch_proof_with_malformed_journal( da_slot_hash: [0u8; 32].into(), prev_soft_confirmation_hash: [0u8; 32], final_soft_confirmation_hash: [0u8; 32], - state_diff: BTreeMap::new(), + state_diff: state_diff.unwrap_or_default(), sequencer_commitments_range: (0, 0), - sequencer_da_public_key: [0u8; 32].to_vec(), - sequencer_public_key: [0u8; 32].to_vec(), + sequencer_da_public_key, + sequencer_public_key, preproven_commitments: vec![], }; - let output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); + let mut output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); - let mut output_serialized_malformed = vec![1u8]; - output_serialized_malformed.extend(output_serialized.clone()); + // Distorts the output and make it unparsable + if malformed_journal { + output_serialized.push(1u8); + } - let claim = MaybePruned::Value(ReceiptClaim::ok( - method_id, - output_serialized_malformed.clone(), - )); + let claim = MaybePruned::Value(ReceiptClaim::ok(method_id, output_serialized.clone())); let fake_receipt = FakeReceipt::new(claim); // Receipt with verifiable claim - let receipt = Receipt::new( - InnerReceipt::Fake(fake_receipt), - output_serialized_malformed.clone(), - ); + let receipt = Receipt::new(InnerReceipt::Fake(fake_receipt), output_serialized); bincode::serialize(&receipt).unwrap() } diff --git a/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs b/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs index be1477cc2..dd41db339 100644 --- a/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs +++ b/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs @@ -359,112 +359,151 @@ pub fn create_inscription_type_1( ); } // push end if - let reveal_script = reveal_script_builder.push_opcode(OP_ENDIF).into_script(); - - let (control_block, merkle_root, tapscript_hash) = - build_taproot(&reveal_script, public_key, SECP256K1); - - // create commit tx address - let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); + let reveal_script_builder = reveal_script_builder.push_opcode(OP_ENDIF); + + // Start loop to find a 'nonce' i.e. random number that makes the reveal tx hash starting with zeros given length + let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X + loop { + if nonce % 1000 == 0 { + trace!(nonce, "Trying to find commit & reveal nonce for chunk"); + if nonce > 16384 { + warn!("Too many iterations finding nonce for chunk"); + } + } + // ownerships are moved to the loop + let mut reveal_script_builder = reveal_script_builder.clone(); + + // push nonce + reveal_script_builder = reveal_script_builder + .push_slice(nonce.to_le_bytes()) + // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 + .push_opcode(OP_NIP); + nonce += 1; + + // finalize reveal script + let reveal_script = reveal_script_builder.into_script(); + + let (control_block, merkle_root, tapscript_hash) = + build_taproot(&reveal_script, public_key, SECP256K1); + + // create commit tx address + let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); + + let reveal_value = REVEAL_OUTPUT_AMOUNT; + let fee = get_size_reveal( + change_address.script_pubkey(), + reveal_value, + &reveal_script, + &control_block, + ) as u64 + * reveal_fee_rate; + let reveal_input_value = fee + reveal_value; + + // build commit tx + let (unsigned_commit_tx, leftover_utxos) = build_commit_transaction( + prev_utxo.clone(), + utxos.clone(), + commit_tx_address.clone(), + change_address.clone(), + reveal_input_value, + commit_fee_rate, + )?; + + let output_to_reveal = unsigned_commit_tx.output[0].clone(); + + // If commit + let commit_change = if unsigned_commit_tx.output.len() > 1 { + Some(UTXO { + tx_id: unsigned_commit_tx.compute_txid(), + vout: 1, + address: None, + script_pubkey: unsigned_commit_tx.output[0].script_pubkey.to_hex_string(), + amount: unsigned_commit_tx.output[1].value.to_sat(), + confirmations: 0, + spendable: true, + solvable: true, + }) + } else { + None + }; + + let mut reveal_tx = build_reveal_transaction( + output_to_reveal.clone(), + unsigned_commit_tx.compute_txid(), + 0, + change_address.clone(), + reveal_value, + reveal_fee_rate, + &reveal_script, + &control_block, + )?; + + build_witness( + &unsigned_commit_tx, + &mut reveal_tx, + tapscript_hash, + reveal_script, + control_block, + &key_pair, + SECP256K1, + ); - let reveal_value = REVEAL_OUTPUT_AMOUNT; - let fee = get_size_reveal( - change_address.script_pubkey(), - reveal_value, - &reveal_script, - &control_block, - ) as u64 - * reveal_fee_rate; - let reveal_input_value = fee + reveal_value; + let reveal_wtxid = reveal_tx.compute_wtxid(); + let reveal_hash = reveal_wtxid.as_raw_hash().to_byte_array(); - // build commit tx - let (unsigned_commit_tx, leftover_utxos) = build_commit_transaction( - prev_utxo.clone(), - utxos, - commit_tx_address.clone(), - change_address.clone(), - reveal_input_value, - commit_fee_rate, - )?; + // check if first N bytes equal to the given prefix + if !reveal_hash.starts_with(reveal_tx_prefix) { + // try another nonce + continue; + } - let output_to_reveal = unsigned_commit_tx.output[0].clone(); + // check if inscription locked to the correct address + let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); + let (x_only_pub_key, _parity) = recovery_key_pair.to_inner().x_only_public_key(); + assert_eq!( + Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), + network, + ), + commit_tx_address + ); - // If commit - let commit_change = if unsigned_commit_tx.output.len() > 1 { - Some(UTXO { - tx_id: unsigned_commit_tx.compute_txid(), - vout: 1, + // set prev utxo to last reveal tx[0] to chain txs in order + prev_utxo = Some(UTXO { + tx_id: reveal_tx.compute_txid(), + vout: 0, + script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), address: None, - script_pubkey: unsigned_commit_tx.output[0].script_pubkey.to_hex_string(), - amount: unsigned_commit_tx.output[1].value.to_sat(), + amount: reveal_tx.output[0].value.to_sat(), confirmations: 0, spendable: true, solvable: true, - }) - } else { - None - }; + }); - let mut reveal_tx = build_reveal_transaction( - output_to_reveal.clone(), - unsigned_commit_tx.compute_txid(), - 0, - change_address.clone(), - reveal_value, - reveal_fee_rate, - &reveal_script, - &control_block, - )?; + commit_chunks.push(unsigned_commit_tx); + reveal_chunks.push(reveal_tx); - build_witness( - &unsigned_commit_tx, - &mut reveal_tx, - tapscript_hash, - reveal_script, - control_block, - &key_pair, - SECP256K1, - ); - - // check if inscription locked to the correct address - let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); - let (x_only_pub_key, _parity) = recovery_key_pair.to_inner().x_only_public_key(); - assert_eq!( - Address::p2tr_tweaked( - TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), - network, - ), - commit_tx_address - ); + // Replace utxos with leftovers so we don't use prev utxos in next chunks + utxos = leftover_utxos; + if let Some(change) = commit_change { + utxos.push(change); + } - // set prev utxo to last reveal tx[0] to chain txs in order - prev_utxo = Some(UTXO { - tx_id: reveal_tx.compute_txid(), - vout: 0, - script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), - address: None, - amount: reveal_tx.output[0].value.to_sat(), - confirmations: 0, - spendable: true, - solvable: true, - }); - - commit_chunks.push(unsigned_commit_tx); - reveal_chunks.push(reveal_tx); - - // Replace utxos with leftovers so we don't use prev utxos in next chunks - utxos = leftover_utxos; - if let Some(change) = commit_change { - utxos.push(change); + break; } } - let reveal_tx_ids: Vec<_> = reveal_chunks + let (reveal_tx_ids, reveal_wtx_ids): (Vec<_>, Vec<_>) = reveal_chunks .iter() - .map(|tx| tx.compute_txid().to_byte_array()) + .map(|tx| { + ( + tx.compute_txid().to_byte_array(), + tx.compute_wtxid().to_byte_array(), + ) + }) .collect(); - let aggregate = DaDataLightClient::Aggregate(reveal_tx_ids); + let aggregate = DaDataLightClient::Aggregate(reveal_tx_ids, reveal_wtx_ids); // To sign the list of tx ids we assume they form a contigious list of bytes let reveal_body: Vec = @@ -500,9 +539,9 @@ pub fn create_inscription_type_1( let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X loop { if nonce % 1000 == 0 { - trace!(nonce, "Trying to find commit & reveal nonce"); + trace!(nonce, "Trying to find commit & reveal nonce for aggr"); if nonce > 16384 { - warn!("Too many iterations finding nonce"); + warn!("Too many iterations finding nonce for aggr"); } } let utxos = utxos.clone(); @@ -515,6 +554,7 @@ pub fn create_inscription_type_1( .push_slice(nonce.to_le_bytes()) // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 .push_opcode(OP_NIP); + nonce += 1; // finalize reveal script let reveal_script = reveal_script_builder.into_script(); @@ -609,8 +649,6 @@ pub fn create_inscription_type_1( ); } } - - nonce += 1; } } diff --git a/crates/bitcoin-da/src/helpers/parsers.rs b/crates/bitcoin-da/src/helpers/parsers.rs index 8efd7a7fd..ed5f365fa 100644 --- a/crates/bitcoin-da/src/helpers/parsers.rs +++ b/crates/bitcoin-da/src/helpers/parsers.rs @@ -125,6 +125,18 @@ impl VerifyParsed for ParsedSequencerCommitment { } } +impl VerifyParsed for ParsedChunk { + fn public_key(&self) -> &[u8] { + unimplemented!("public_key call Should not be used with chunks") + } + fn signature(&self) -> &[u8] { + unimplemented!("signature call Should not be used with chunks") + } + fn body(&self) -> &[u8] { + &self.body + } +} + impl VerifyParsed for ParsedBatchProverMethodId { fn public_key(&self) -> &[u8] { &self.public_key @@ -432,6 +444,16 @@ mod light_client { } } + // Nonce + let _nonce = read_push_bytes(instructions)?; + if OP_NIP != read_opcode(instructions)? { + return Err(ParserError::UnexpectedOpcode); + } + // END of transaction + if instructions.next().is_some() { + return Err(ParserError::UnexpectedOpcode); + } + let body_size: usize = chunks.iter().map(|c| c.len()).sum(); let mut body = Vec::with_capacity(body_size); for chunk in chunks { diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 5128ccb25..89f8b156b 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -18,7 +18,7 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::SecretKey; use bitcoin::{Amount, BlockHash, CompactTarget, Transaction, Txid, Wtxid}; use bitcoincore_rpc::json::{SignRawTransactionInput, TestMempoolAcceptResult}; -use bitcoincore_rpc::{Auth, Client, Error, RpcApi, RpcError}; +use bitcoincore_rpc::{Auth, Client, Error as BitcoinError, Error, RpcApi, RpcError}; use borsh::BorshDeserialize; use citrea_primitives::compression::{compress_blob, decompress_blob}; use citrea_primitives::MAX_TXBODY_SIZE; @@ -586,7 +586,7 @@ impl BitcoinService { { bail!( "{}", - reject_reason.unwrap_or("[testmempoolaccept] Unkown rejection".to_string()) + reject_reason.unwrap_or("[testmempoolaccept] Unknown rejection".to_string()) ) } } @@ -754,6 +754,11 @@ impl DaService for BitcoinService { Ok(head_block_header.header) } + fn decompress_chunks(&self, complete_chunks: &[u8]) -> Result, Self::Error> { + borsh::from_slice(decompress_blob(complete_chunks).as_slice()) + .map_err(|_| anyhow!("Failed to parse complete chunks")) + } + async fn extract_relevant_zk_proofs( &self, block: &Self::FilteredBlock, @@ -814,7 +819,7 @@ impl DaService for BitcoinService { let mut body = Vec::new(); let data = DaDataLightClient::try_from_slice(&aggregate.body) .map_err(|e| anyhow!("{}: Failed to parse aggregate: {e}", tx_id))?; - let DaDataLightClient::Aggregate(chunk_ids) = data else { + let DaDataLightClient::Aggregate(chunk_ids, _wtx_ids) = data else { error!("{}: Aggregate: unexpected kind", tx_id); continue; }; @@ -830,12 +835,9 @@ impl DaService for BitcoinService { self.client .get_raw_transaction(&chunk_id, None) .await - .map_err(|e| { - use bitcoincore_rpc::Error; - match e { - Error::Io(_) => backoff::Error::transient(e), - _ => backoff::Error::permanent(e), - } + .map_err(|e| match e { + BitcoinError::Io(_) => backoff::Error::transient(e), + _ => backoff::Error::permanent(e), }) }) .await; @@ -847,6 +849,7 @@ impl DaService for BitcoinService { } } }; + let wrapped: TransactionWrapper = tx_raw.into(); let parsed = match parse_light_client_transaction(&wrapped) { Ok(r) => r, @@ -989,6 +992,7 @@ impl DaService for BitcoinService { let mut relevant_txs = vec![]; for tx in &completeness_proof { + let wtxid = tx.compute_wtxid(); match namespace { DaNamespace::ToBatchProver => { if let Ok(tx) = parse_batch_proof_transaction(tx) { @@ -999,6 +1003,7 @@ impl DaService for BitcoinService { seq_comm.body, seq_comm.public_key, hash, + None, ); relevant_txs.push(relevant_tx); @@ -1013,9 +1018,12 @@ impl DaService for BitcoinService { ParsedLightClientTransaction::Complete(complete) => { if let Some(hash) = complete.get_sig_verified_hash() { let blob = decompress_blob(&complete.body); - let relevant_tx = - BlobWithSender::new(blob, complete.public_key, hash); - + let relevant_tx = BlobWithSender::new( + blob, + complete.public_key, + hash, + Some(wtxid.to_byte_array()), + ); relevant_txs.push(relevant_tx); } } @@ -1025,13 +1033,19 @@ impl DaService for BitcoinService { aggregate.body, aggregate.public_key, hash, + Some(wtxid.to_byte_array()), ); - relevant_txs.push(relevant_tx); } } - ParsedLightClientTransaction::Chunk(_) => { - // ignore + ParsedLightClientTransaction::Chunk(chunk) => { + let relevant_tx = BlobWithSender::new( + chunk.body, + vec![0], + [0; 32], + Some(wtxid.to_byte_array()), + ); + relevant_txs.push(relevant_tx); } ParsedLightClientTransaction::BatchProverMethodId(method_id) => { if let Some(hash) = method_id.get_sig_verified_hash() { @@ -1039,8 +1053,8 @@ impl DaService for BitcoinService { method_id.body, method_id.public_key, hash, + Some(wtxid.to_byte_array()), ); - relevant_txs.push(relevant_tx); } } @@ -1180,7 +1194,7 @@ pub fn get_relevant_blobs_from_txs( ParsedBatchProofTransaction::SequencerCommitment(seq_comm) => { if let Some(hash) = seq_comm.get_sig_verified_hash() { let relevant_tx = - BlobWithSender::new(seq_comm.body, seq_comm.public_key, hash); + BlobWithSender::new(seq_comm.body, seq_comm.public_key, hash, None); relevant_txs.push(relevant_tx); } @@ -1208,6 +1222,7 @@ impl From for [u8; 32] { fn split_proof(zk_proof: Proof) -> RawLightClientData { let original_blob = borsh::to_vec(&zk_proof).expect("zk::Proof serialize must not fail"); let original_compressed = compress_blob(&original_blob); + if original_compressed.len() < MAX_TXBODY_SIZE { let data = DaDataLightClient::Complete(zk_proof); let blob = borsh::to_vec(&data).expect("zk::Proof serialize must not fail"); @@ -1220,6 +1235,7 @@ fn split_proof(zk_proof: Proof) -> RawLightClientData { let blob = borsh::to_vec(&data).expect("zk::Proof Chunk serialize must not fail"); chunks.push(blob) } + RawLightClientData::Chunks(chunks) } } diff --git a/crates/bitcoin-da/src/spec/blob.rs b/crates/bitcoin-da/src/spec/blob.rs index 3f501ddff..4ed870de8 100644 --- a/crates/bitcoin-da/src/spec/blob.rs +++ b/crates/bitcoin-da/src/spec/blob.rs @@ -13,8 +13,20 @@ pub struct BlobBuf { pub offset: usize, } +// BlobWithSender is a wrapper around BlobBuf to implement BlobReaderTrait +#[derive(Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] +pub struct BlobWithSender { + pub hash: [u8; 32], + + pub sender: AddressWrapper, + + pub blob: CountedBufReader, + + pub wtxid: Option<[u8; 32]>, +} + impl BlobWithSender { - pub fn new(blob: Vec, sender: Vec, hash: [u8; 32]) -> Self { + pub fn new(blob: Vec, sender: Vec, hash: [u8; 32], wtxid: Option<[u8; 32]>) -> Self { Self { blob: CountedBufReader::new(BlobBuf { data: blob, @@ -22,6 +34,7 @@ impl BlobWithSender { }), sender: AddressWrapper(sender), hash, + wtxid, } } } @@ -40,16 +53,6 @@ impl Buf for BlobBuf { } } -// BlobWithSender is a wrapper around BlobBuf to implement BlobReaderTrait -#[derive(Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] -pub struct BlobWithSender { - pub hash: [u8; 32], - - pub sender: AddressWrapper, - - pub blob: CountedBufReader, -} - impl BlobReaderTrait for BlobWithSender { type Address = AddressWrapper; @@ -61,6 +64,10 @@ impl BlobReaderTrait for BlobWithSender { self.hash } + fn wtxid(&self) -> Option<[u8; 32]> { + self.wtxid + } + fn verified_data(&self) -> &[u8] { self.blob.accumulator() } @@ -74,4 +81,21 @@ impl BlobReaderTrait for BlobWithSender { self.blob.advance(num_bytes); self.verified_data() } + + fn serialize_v1(&self) -> borsh::io::Result> { + let v1 = BlobWithSenderV1 { + hash: self.hash, + sender: self.sender.clone(), + blob: &self.blob, + }; + borsh::to_vec(&v1) + } +} + +#[derive(BorshSerialize)] +/// Internal type to ease serialization process +struct BlobWithSenderV1<'a> { + hash: [u8; 32], + sender: AddressWrapper, + blob: &'a CountedBufReader, } diff --git a/crates/bitcoin-da/src/verifier.rs b/crates/bitcoin-da/src/verifier.rs index a0061226a..577e86ee1 100644 --- a/crates/bitcoin-da/src/verifier.rs +++ b/crates/bitcoin-da/src/verifier.rs @@ -57,6 +57,7 @@ pub enum ValidationError { InvalidTargetHash, InvalidTimestamp, HeaderInclusionTxCountMismatch, + FailedToDeserializeCompleteChunks, } impl DaVerifier for BitcoinVerifier { @@ -71,6 +72,11 @@ impl DaVerifier for BitcoinVerifier { } } + fn decompress_chunks(&self, complete_chunks: &[u8]) -> Result, Self::Error> { + borsh::from_slice(decompress_blob(complete_chunks).as_slice()) + .map_err(|_| ValidationError::FailedToDeserializeCompleteChunks) + } + // Verify that the given list of blob transactions is complete and correct. fn verify_transactions( &self, @@ -144,8 +150,11 @@ impl DaVerifier for BitcoinVerifier { } } } - ParsedLightClientTransaction::Chunk(_chunk) => { - // ignore + ParsedLightClientTransaction::Chunk(chunk) => { + let blob_content = verified_blob_chunk(&mut blobs_iter, wtxid)?; + if blob_content != chunk.body { + return Err(ValidationError::BlobContentWasModified); + } } ParsedLightClientTransaction::BatchProverMethodId(method_id) => { if let Some(blob_content) = @@ -525,6 +534,26 @@ impl BitcoinVerifier { } } +fn verified_blob_chunk<'a, I>( + blobs_iter: &mut I, + wtxid: &[u8; 32], +) -> Result<&'a [u8], ValidationError> +where + I: Iterator, +{ + let blob = blobs_iter.next(); + + let Some(blob) = blob else { + return Err(ValidationError::ValidBlobNotFoundInBlobs); + }; + + if blob.wtxid != Some(*wtxid) { + return Err(ValidationError::BlobWasTamperedWith); + } + + return Ok(blob.verified_data()); +} + // Get associated blob content only if signatures, hashes and public keys match fn verified_blob_content<'a, T, I>( tx: &T, diff --git a/crates/bitcoin-da/tests/service.rs b/crates/bitcoin-da/tests/service.rs index f0c31072c..ffe3bec3b 100644 --- a/crates/bitcoin-da/tests/service.rs +++ b/crates/bitcoin-da/tests/service.rs @@ -109,12 +109,12 @@ impl TestCase for BitcoinServiceTest { service.extract_relevant_blobs_with_proof(&block, DaNamespace::ToLightClientProver); assert_eq!(inclusion_proof.wtxids.len(), 33); assert_eq!(inclusion_proof.wtxids[1..], block_wtxids[1..]); - // 2 complete, 2 aggregate proofs, and 2 method id txs - assert_eq!(txs.len(), 6); + // 2 complete, 2 aggregate proofs with 2 chunks for the first and 3 chunks for the second agg, and 2 method id txs + assert_eq!(txs.len(), 11); // it is >= due to the probability that one of commit transactions ended up // with the prefix by chance (reveals are guaranteed to have a certain prefix) assert!( - completeness_proof.len() >= 6, + completeness_proof.len() >= 11, "expected completeness proof to have at least 6 txs, it has {}", completeness_proof.len() ); diff --git a/crates/bitcoin-da/tests/test_utils.rs b/crates/bitcoin-da/tests/test_utils.rs index 075224979..bd25922b8 100644 --- a/crates/bitcoin-da/tests/test_utils.rs +++ b/crates/bitcoin-da/tests/test_utils.rs @@ -444,7 +444,7 @@ pub fn get_blob_with_sender(tx: &Transaction, ty: MockData) -> anyhow::Result { DaTxsCouldntBeVerified(DaV::Error), @@ -74,7 +79,22 @@ pub fn run_circuit( .map_err(|err| LightClientVerificationError::DaTxsCouldntBeVerified(err))?; // Mapping from initial state root to final state root and last L2 height - let mut initial_to_final = std::collections::BTreeMap::<[u8; 32], ([u8; 32], u64)>::new(); + let mut initial_to_final = BTreeMap::<[u8; 32], ([u8; 32], u64)>::new(); + + let (mut last_state_root, mut last_l2_height, mut mmr_guest) = + previous_light_client_proof_output.as_ref().map_or_else( + || { + // if no previous proof, we start from genesis state root + (l2_genesis_root, 0, MMRGuest::new()) + }, + |prev_journal| { + ( + prev_journal.state_root, + prev_journal.last_l2_height, + prev_journal.mmr_guest.clone(), + ) + }, + ); // If we have a previous light client proof, check they can be chained // If not, skip for now @@ -91,115 +111,147 @@ pub fn run_circuit( } } - let (mut last_state_root, mut last_l2_height) = - previous_light_client_proof_output.as_ref().map_or_else( - || { - // if no previous proof, we start from genesis state root - (l2_genesis_root, 0) - }, - |prev_journal| (prev_journal.state_root, prev_journal.last_l2_height), - ); + let mut in_memory_chunks: BTreeMap> = Default::default(); + let mut mmr_hints = input.mmr_hints; // index only incremented on processing of a complete or aggregate DA tx let mut current_proof_index = 0u32; let mut expected_to_fail_hints = input.expected_to_fail_hint.into_iter().peekable(); // Parse the batch proof da data - for blob in input.da_data { - if blob.sender().as_ref() == batch_prover_da_public_key { - let data = DaDataLightClient::try_from_slice(blob.verified_data()); - - if let Ok(data) = data { - match data { - DaDataLightClient::Complete(proof) => { - let Ok(journal) = G::extract_raw_output(&proof) else { - // cannot parse the output, skip - continue; - }; - - let ( - batch_proof_output_initial_state_root, - batch_proof_output_final_state_root, - batch_proof_output_last_l2_height, - ) = if let Ok(output) = G::deserialize_output::< - BatchProofCircuitOutput, - >(&journal) - { - ( - output.initial_state_root, - output.final_state_root, - output.last_l2_height, - ) - } else if let Ok(output) = G::deserialize_output::< - OldBatchProofCircuitOutput, - >(&journal) - { - (output.initial_state_root, output.final_state_root, 0) - } else { - continue; // cannot parse the output, skip - }; - - // Do not add if last l2 height is smaller or equal to previous output - // This is to defend against replay attacks, for example if somehow there is the script of batch proof 1 we do not need to go through it again - if batch_proof_output_last_l2_height <= last_l2_height - && last_l2_height != 0 - { - current_proof_index += 1; - continue; - } + 'blob_loop: for blob in input.da_data { + let Ok(data) = DaDataLightClient::try_from_slice(blob.verified_data()) else { + println!("Unparseable blob in da_data, wtxid={:?}", blob.wtxid()); + continue; + }; - let batch_proof_method_id = if batch_proof_method_ids.len() == 1 { - // Check if last l2 height is greater than or equal to the only batch proof method id activation height - batch_proof_method_ids[0].1 - } else { - let idx = match batch_proof_method_ids - // Returns err and the index to be inserted, which is the index of the first element greater than the key - // That is why we need to subtract 1 to get the last element smaller than the key - .binary_search_by_key( - &batch_proof_output_last_l2_height, - |(height, _)| *height, - ) { - Ok(idx) => idx, - Err(idx) => idx.saturating_sub(1), - }; - batch_proof_method_ids[idx].1 - }; - - if expected_to_fail_hints - .next_if(|&x| x == current_proof_index) - .is_some() - { - // if index is in the expected to fail hints, then it should fail - G::verify_expected_to_fail(&proof, &batch_proof_method_id.into()) - .expect_err("Proof hinted to fail passed"); + match data { + // No need to check sender for chunk + DaDataLightClient::Chunk(chunk) => { + println!("Found chunk"); + in_memory_chunks.insert(blob.wtxid().expect("Chunk should have a wtxid"), chunk); + } + DaDataLightClient::Complete(proof) => { + println!("Found complete proof"); + if blob.sender().as_ref() != batch_prover_da_public_key { + println!( + "Complete proof sender is not batch prover, wtxid={:?}", + blob.wtxid() + ); + continue; + } + + let expected_to_fail = expected_to_fail_hints + .next_if(|&x| x == current_proof_index) + .is_some(); + println!("Complete proof expected to fail: {}", expected_to_fail); + match process_complete_proof::( + &proof, + &batch_proof_method_ids, + last_l2_height, + &mut initial_to_final, + expected_to_fail, + ) { + Ok(()) => current_proof_index += 1, + Err(e) => println!("Error processing complete proof: {e}"), + } + } + DaDataLightClient::Aggregate(_, wtxids) => { + println!("Found aggregate proof"); + if blob.sender().as_ref() != batch_prover_da_public_key { + println!( + "Aggregate proof sender is not batch prover, wtxid={:?}", + blob.wtxid() + ); + continue; + } + + // Ensure that aggregate has all the needed chunks. + // We can recreate iterator here on every aggregate, because when recreating + // the complete proof, we pop the used hints from the mmr_hints. + let mut mmr_hints_iter = mmr_hints.iter(); + let mut in_memory_chunk_count = 0; + for wtxid in &wtxids { + if in_memory_chunks.contains_key(wtxid) { + in_memory_chunk_count += 1; + continue; + } + + let hint = mmr_hints_iter.next(); + if hint.is_none() || hint.unwrap().0.wtxid != *wtxid { + println!("Missing mmr hint, unprovable aggregate {:?}", blob.wtxid()); + continue 'blob_loop; + } + } + + println!( + "Aggregate has all needed chunks, {} from current block, {} from previous blocks", + in_memory_chunk_count, + wtxids.len() - in_memory_chunk_count, + ); + + let mut complete_proof = vec![]; + // Used for re-adding chunks back in case of failure + let mut used_chunk_ptrs = Vec::with_capacity(in_memory_chunk_count); + for wtxid in wtxids { + if let Some(chunk) = in_memory_chunks.remove(&wtxid) { + used_chunk_ptrs.push((complete_proof.len(), chunk.len(), wtxid)); + complete_proof.extend(chunk); + } else { + let (chunk, proof) = mmr_hints.pop_front().expect("Already checked"); + + if mmr_guest.verify_proof(&chunk, &proof) { + complete_proof.extend(chunk.body); } else { - // if index is not in the expected to fail hints, then it should pass - G::verify(&journal, &batch_proof_method_id.into()) - .expect("Proof hinted to pass failed"); - recursive_match_state_roots( - &mut initial_to_final, - &BatchProofInfo::new( - batch_proof_output_initial_state_root, - batch_proof_output_final_state_root, - batch_proof_output_last_l2_height, - ), - ); + panic!("Failed to verify MMR proof for hint"); } + } + } + + let reinsert_used_chunks = || { + for (idx, size, wtxid) in used_chunk_ptrs { + let chunk = complete_proof[idx..idx + size].to_vec(); + in_memory_chunks.insert(wtxid, chunk); + } + }; + + let Ok(complete_proof) = da_verifier.decompress_chunks(&complete_proof) else { + println!("Failed to decompress and deserialize completed chunks"); + reinsert_used_chunks(); + continue; + }; - current_proof_index += 1; + let expected_to_fail = expected_to_fail_hints + .next_if(|&x| x == current_proof_index) + .is_some(); + println!("Aggregate proof expected to fail: {}", expected_to_fail); + match process_complete_proof::( + &complete_proof, + &batch_proof_method_ids, + last_l2_height, + &mut initial_to_final, + expected_to_fail, + ) { + Ok(()) => current_proof_index += 1, + // serialization or duplicate proof error + Err(e) => { + reinsert_used_chunks(); + println!("Error processing aggregated proof: {e}"); } - DaDataLightClient::Aggregate(_) => todo!(), - DaDataLightClient::Chunk(_) => todo!(), - DaDataLightClient::BatchProofMethodId(_) => {} // if coming from batch prover, ignore } } - } else if blob.sender().as_ref() == method_id_upgrade_authority_da_public_key { - let data = DaDataLightClient::try_from_slice(blob.verified_data()); - - if let Ok(DaDataLightClient::BatchProofMethodId(BatchProofMethodId { + DaDataLightClient::BatchProofMethodId(BatchProofMethodId { method_id, activation_l2_height, - })) = data - { + }) => { + println!("Found batch proof method id"); + if blob.sender().as_ref() != method_id_upgrade_authority_da_public_key { + println!( + "Batch proof method id sender is not upgrade authority, wtxid={:?}", + blob.wtxid() + ); + continue; + } + let last_activation_height = batch_proof_method_ids .last() .expect("Should be at least one") @@ -227,6 +279,13 @@ pub fn run_circuit( // Collect unchained outputs let unchained_outputs = collect_unchained_outputs(&initial_to_final, last_l2_height); + if !in_memory_chunks.is_empty() { + println!("Adding {} more chunks to mmr", in_memory_chunks.len()); + for (wtxid, chunk) in in_memory_chunks { + mmr_guest.append(MMRChunk::new(wtxid, chunk)); + } + } + Ok(LightClientCircuitOutput { state_root: last_state_root, light_client_proof_method_id: input.light_client_proof_method_id, @@ -234,5 +293,79 @@ pub fn run_circuit( unchained_batch_proofs_info: unchained_outputs, last_l2_height, batch_proof_method_ids, + mmr_guest, }) } + +fn process_complete_proof( + proof: &[u8], + batch_proof_method_ids: &InitialBatchProofMethodIds, + last_l2_height: u64, + initial_to_final: &mut std::collections::BTreeMap<[u8; 32], ([u8; 32], u64)>, + expected_to_fail: bool, +) -> Result<(), CircuitError> { + let Ok(journal) = G::extract_raw_output(proof) else { + return Err("Failed to extract output from proof"); + }; + + let ( + batch_proof_output_initial_state_root, + batch_proof_output_final_state_root, + batch_proof_output_last_l2_height, + ) = if let Ok(output) = + G::deserialize_output::>(&journal) + { + ( + output.initial_state_root, + output.final_state_root, + output.last_l2_height, + ) + } else if let Ok(output) = + G::deserialize_output::>(&journal) + { + (output.initial_state_root, output.final_state_root, 0) + } else { + return Err("Failed to parse proof"); + }; + + // Do not add if last l2 height is smaller or equal to previous output + // This is to defend against replay attacks, for example if somehow there is the script of batch proof 1 we do not need to go through it again + if batch_proof_output_last_l2_height <= last_l2_height && last_l2_height != 0 { + return Err("Last L2 height is less than proof's last l2 height"); + } + + let batch_proof_method_id = if batch_proof_method_ids.len() == 1 { + batch_proof_method_ids[0].1 + } else { + let idx = match batch_proof_method_ids + // Returns err and the index to be inserted, which is the index of the first element greater than the key + // That is why we need to subtract 1 to get the last element smaller than the key + .binary_search_by_key(&batch_proof_output_last_l2_height, |(height, _)| *height) + { + Ok(idx) => idx, + Err(idx) => idx.saturating_sub(1), + }; + batch_proof_method_ids[idx].1 + }; + + println!("Using batch proof method id {:?}", batch_proof_method_id); + + if expected_to_fail { + // if index is in the expected to fail hints, then it should fail + G::verify_expected_to_fail(proof, &batch_proof_method_id.into()) + .expect_err("Proof hinted to fail passed"); + } else { + // if index is not in the expected to fail hints, then it should pass + G::verify(&journal, &batch_proof_method_id.into()).expect("Proof hinted to pass failed"); + recursive_match_state_roots( + initial_to_final, + &BatchProofInfo::new( + batch_proof_output_initial_state_root, + batch_proof_output_final_state_root, + batch_proof_output_last_l2_height, + ), + ); + } + + Ok(()) +} diff --git a/crates/light-client-prover/src/da_block_handler.rs b/crates/light-client-prover/src/da_block_handler.rs index 60edffbb6..d02c785e9 100644 --- a/crates/light-client-prover/src/da_block_handler.rs +++ b/crates/light-client-prover/src/da_block_handler.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, VecDeque}; +use std::collections::{BTreeMap, HashMap, VecDeque}; use std::sync::Arc; use borsh::BorshDeserialize; @@ -7,9 +7,11 @@ use citrea_common::da::get_da_block_at_height; use citrea_common::LightClientProverConfig; use citrea_primitives::forks::fork_from_block_number; use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps}; +use sov_db::mmr_db::MmrDB; use sov_db::schema::types::{SlotNumber, StoredLightClientProofOutput}; use sov_modules_api::{BatchProofCircuitOutput, BlobReaderTrait, DaSpec, Zkvm}; use sov_rollup_interface::da::{BlockHeaderTrait, DaDataLightClient, DaNamespace}; +use sov_rollup_interface::mmr::{MMRChunk, MMRNative, Wtxid}; use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::zk::{ @@ -20,7 +22,7 @@ use tokio::select; use tokio::sync::{mpsc, Mutex}; use tokio::time::{sleep, Duration}; use tokio_util::sync::CancellationToken; -use tracing::{error, info}; +use tracing::{error, info, warn}; use crate::metrics::LIGHT_CLIENT_METRICS; @@ -41,6 +43,7 @@ where light_client_proof_elfs: HashMap>, l1_block_cache: Arc>>, queued_l1_blocks: VecDeque<::FilteredBlock>, + mmr_native: MMRNative, } impl L1BlockHandler @@ -60,7 +63,9 @@ where batch_proof_code_commitments: HashMap, light_client_proof_code_commitments: HashMap, light_client_proof_elfs: HashMap>, + mmr_db: MmrDB, ) -> Self { + let mmr_native = MMRNative::new(mmr_db); Self { _prover_config: prover_config, prover_service, @@ -72,6 +77,7 @@ where light_client_proof_elfs, l1_block_cache: Arc::new(Mutex::new(L1BlockCache::new())), queued_l1_blocks: VecDeque::new(), + mmr_native, } } @@ -122,7 +128,8 @@ where let l1_block = self .queued_l1_blocks .front() - .expect("Pending l1 blocks cannot be empty"); + .expect("Pending l1 blocks cannot be empty") + .clone(); self.process_l1_block(l1_block).await?; @@ -132,7 +139,7 @@ where Ok(()) } - async fn process_l1_block(&self, l1_block: &Da::FilteredBlock) -> anyhow::Result<()> { + async fn process_l1_block(&mut self, l1_block: Da::FilteredBlock) -> anyhow::Result<()> { let l1_hash = l1_block.header().hash().into(); let l1_height = l1_block.header().height(); @@ -143,7 +150,7 @@ where let (mut da_data, inclusion_proof, completeness_proof) = self .da_service - .extract_relevant_blobs_with_proof(l1_block, DaNamespace::ToLightClientProver); + .extract_relevant_blobs_with_proof(&l1_block, DaNamespace::ToLightClientProver); // Even though following extract_batch_proofs call does full_data on batch proofs, // we also need to do it for BatchProofMethodId txs @@ -186,58 +193,131 @@ where batch_proofs.len() ); + let mut unused_chunks = BTreeMap::>::new(); + let mut mmr_hints = vec![]; // index only incremented for complete and aggregated proofs, in line with the circuit let mut proof_index = 0u32; let mut expected_to_fail_hint = vec![]; - for batch_proof in batch_proofs { - // TODO handle aggreagates - if let DaDataLightClient::Complete(proof) = batch_proof { - let batch_proof_last_l2_height = match Vm::extract_output::< - BatchProofCircuitOutput<::Spec, [u8; 32]>, - >(&proof) - { - Ok(output) => output.last_l2_height, - Err(e) => { - info!("Failed to extract post fork 1 output from proof: {:?}. Trying to extract pre fork 1 output", e); - if Vm::extract_output::< - OldBatchProofCircuitOutput<::Spec, [u8; 32]>, - >(&proof) - .is_err() - { - tracing::info!( - "Failed to extract pre fork1 and fork1 output from proof" - ); + 'proof_loop: for (wtxid, batch_proof) in batch_proofs { + info!("Batch proof wtxid={}", hex::encode(wtxid)); + match batch_proof { + DaDataLightClient::Complete(proof) => { + info!("It is complete proof"); + match self.verify_complete_proof(&proof, l2_last_height) { + Ok(true) => { + info!("Complete proof verified successfully"); + assumptions.push(proof); + proof_index += 1; + } + Ok(false) => { + warn!("Complete proof is expected to fail"); + expected_to_fail_hint.push(proof_index); + proof_index += 1; + } + Err(err) => { + error!("Batch proof verification failed: {err}"); + } + } + } + DaDataLightClient::Aggregate(_txids, wtxids) => { + info!("It is aggregate proof with {} chunks", wtxids.len()); + // Ensure that aggregate has all the needed chunks + let mut used_chunk_count = 0; + for wid in &wtxids { + if unused_chunks.contains_key(wid) { + used_chunk_count += 1; continue; } - // If this is a pre fork 1 proof, then we need to convert it to post fork 1 proof - 0 + if !self.mmr_native.contains(*wid)? { + warn!("Aggregate is unprovable due to missing chunks"); + continue 'proof_loop; + } } - }; - if batch_proof_last_l2_height <= l2_last_height && l2_last_height != 0 { - proof_index += 1; - continue; - } + info!( + "Aggregate has all needed chunks, {} from current block, {} from previous blocks", + used_chunk_count, + wtxids.len() - used_chunk_count, + ); - let current_spec = fork_from_block_number(batch_proof_last_l2_height).spec_id; - let batch_proof_method_id = self - .batch_proof_code_commitments - .get(¤t_spec) - .expect("Batch proof code commitment not found"); - if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) { - tracing::error!("Failed to verify batch proof: {:?}", e); - expected_to_fail_hint.push(proof_index); - } else { - assumptions.push(proof); - } + // Recollect the complete proof from chunks + let mut complete_proof = vec![]; + // Used for re-adding chunks back in case of failure + let mut used_chunk_ptrs = Vec::with_capacity(used_chunk_count); + for wtxid in wtxids { + if let Some(chunk) = unused_chunks.remove(&wtxid) { + used_chunk_ptrs.push((complete_proof.len(), chunk.len(), wtxid)); + complete_proof.extend(chunk); + } else { + let (chunk, proof) = self + .mmr_native + .generate_proof(wtxid)? + .expect("Chunk wtxid must exist"); + complete_proof.extend_from_slice(&chunk.body); + mmr_hints.push((chunk, proof)); + } + } - proof_index += 1; + info!("Aggregate proof reassembled from chunks"); + + let reinsert_used_chunks = || { + for (idx, size, wtxid) in used_chunk_ptrs { + let chunk = complete_proof[idx..idx + size].to_vec(); + unused_chunks.insert(wtxid, chunk); + } + }; + + let Ok(complete_proof) = self.da_service.decompress_chunks(&complete_proof) + else { + error!( + "Failed to decompress complete chunks of aggregate {}", + hex::encode(wtxid) + ); + reinsert_used_chunks(); + continue; + }; + + match self.verify_complete_proof(&complete_proof, l2_last_height) { + Ok(true) => { + info!("Aggregate proof verified successfully"); + assumptions.push(complete_proof); + proof_index += 1; + } + Ok(false) => { + warn!("Aggregate proof is expected to fail"); + expected_to_fail_hint.push(proof_index); + proof_index += 1; + } + Err(err) => { + error!("Invalid aggregate batch proof found: {err}"); + reinsert_used_chunks(); + } + } + } + DaDataLightClient::Chunk(body) => { + info!("It is chunk proof"); + // For now, this chunk is unused by any aggregate in the block. + unused_chunks.insert(wtxid, body); + } + _ => { + continue; + } } } tracing::debug!("assumptions len: {:?}", assumptions.len()); + // Add unused chunks to MMR native. + // Up until this point, the proof has been generated by aggregates in the block, + // so it's okay to update the MMR tree now. + if !unused_chunks.is_empty() { + info!("Adding {} more chunks to mmr", unused_chunks.len()); + for (wtxid, body) in unused_chunks.into_iter() { + self.mmr_native.append(MMRChunk::new(wtxid, body))?; + } + } + let current_fork = fork_from_block_number(l2_last_height); let light_client_proof_code_commitment = self .light_client_proof_code_commitments @@ -256,6 +336,7 @@ where da_block_header: l1_block.header().clone(), light_client_proof_method_id: light_client_proof_code_commitment.clone().into(), previous_light_client_proof_journal: light_client_proof_journal, + mmr_hints: mmr_hints.into(), expected_to_fail_hint, }; @@ -288,28 +369,80 @@ where Ok(()) } + /// Verifies complete proof. Returns: + /// + /// - Ok(true) -> proof is successfully parsed, not a duplicate, and verified + /// - Ok(false) -> proof is successfully parsed, not a duplicate, but verification failed + /// - Err(_) -> proof is either unparseable or a duplicate + fn verify_complete_proof( + &self, + proof: &Vec, + light_client_l2_height: u64, + ) -> anyhow::Result { + let batch_proof_last_l2_height = match Vm::extract_output::< + BatchProofCircuitOutput<::Spec, [u8; 32]>, + >(proof) + { + Ok(output) => output.last_l2_height, + Err(e) => { + warn!("Failed to extract post fork 1 output from proof: {:?}. Trying to extract pre fork 1 output", e); + if Vm::extract_output::< + OldBatchProofCircuitOutput<::Spec, [u8; 32]>, + >(proof) + .is_err() + { + return Err(anyhow::anyhow!("Failed to extract both pre-fork1 and fork1 output from proof")); + } + 0 + } + }; + + if batch_proof_last_l2_height <= light_client_l2_height && light_client_l2_height != 0 { + return Err(anyhow::anyhow!( + "Batch proof l2 height is less than latest light client proof l2 height" + )); + } + + let current_spec = fork_from_block_number(batch_proof_last_l2_height).spec_id; + let batch_proof_method_id = self + .batch_proof_code_commitments + .get(¤t_spec) + .expect("Batch proof code commitment not found"); + + if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) { + warn!("Failed to verify batch proof: {:?}", e); + Ok(false) + } else { + Ok(true) + } + } + async fn extract_batch_proofs( &self, da_data: &mut [<::Spec as DaSpec>::BlobTransaction], da_slot_hash: [u8; 32], // passing this as an argument is not clever - ) -> Vec { + ) -> Vec<(Wtxid, DaDataLightClient)> { let mut batch_proofs = Vec::new(); da_data.iter_mut().for_each(|tx| { - // Check for commitment - if tx.sender().as_ref() == self.batch_prover_da_pub_key.as_slice() { - let data = DaDataLightClient::try_from_slice(tx.full_data()); - - if let Ok(proof) = data { - batch_proofs.push(proof); - } else { - tracing::warn!( - "Found broken DA data in block 0x{}: {:?}", - hex::encode(da_slot_hash), - data - ); + if let Ok(data) = DaDataLightClient::try_from_slice(tx.full_data()) { + match data { + DaDataLightClient::Chunk(_) => { + batch_proofs.push((tx.wtxid().expect("Blob should have wtxid"), data)) + } + _ => { + if tx.sender().as_ref() == self.batch_prover_da_pub_key.as_slice() { + batch_proofs.push((tx.wtxid().expect("Blob should have wtxid"), data)); + } + } } + } else { + tracing::warn!( + "Found broken DA data in block 0x{}", + hex::encode(da_slot_hash) + ); } + // Check for commitment }); batch_proofs } diff --git a/crates/light-client-prover/src/runner.rs b/crates/light-client-prover/src/runner.rs index a6040f688..1dcec51ff 100644 --- a/crates/light-client-prover/src/runner.rs +++ b/crates/light-client-prover/src/runner.rs @@ -7,6 +7,7 @@ use citrea_common::{LightClientProverConfig, RollupPublicKeys, RpcConfig, Runner use jsonrpsee::server::{BatchRequestConfig, ServerBuilder}; use jsonrpsee::RpcModule; use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps}; +use sov_db::mmr_db::MmrDB; use sov_db::schema::types::SlotNumber; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::spec::SpecId; @@ -37,6 +38,7 @@ where batch_proof_commitments_by_spec: HashMap, light_client_proof_commitment: HashMap, light_client_proof_elfs: HashMap>, + mmr_db: MmrDB, } impl CitreaLightClientProver @@ -58,6 +60,7 @@ where batch_proof_commitments_by_spec: HashMap, light_client_proof_commitment: HashMap, light_client_proof_elfs: HashMap>, + mmr_db: MmrDB, task_manager: TaskManager<()>, ) -> Result { Ok(Self { @@ -72,6 +75,7 @@ where batch_proof_commitments_by_spec, light_client_proof_commitment, light_client_proof_elfs, + mmr_db, }) } @@ -150,6 +154,7 @@ where let prover_config = self.prover_config.clone(); let prover_service = self.prover_service.clone(); let ledger_db = self.ledger_db.clone(); + let mmr_db = self.mmr_db.clone(); let da_service = self.da_service.clone(); let batch_prover_da_pub_key = self.public_keys.prover_da_pub_key.clone(); let batch_proof_commitments_by_spec = self.batch_proof_commitments_by_spec.clone(); @@ -166,6 +171,7 @@ where batch_proof_commitments_by_spec, light_client_proof_commitment, light_client_proof_elfs, + mmr_db, ); l1_block_handler .run(last_l1_height_scanned.0, cancellation_token) diff --git a/crates/light-client-prover/src/tests/mod.rs b/crates/light-client-prover/src/tests/mod.rs index a4876944a..b8c85cf01 100644 --- a/crates/light-client-prover/src/tests/mod.rs +++ b/crates/light-client-prover/src/tests/mod.rs @@ -1,10 +1,17 @@ -mod test_utils; +pub mod test_utils; -use sov_mock_da::{MockBlockHeader, MockDaVerifier}; +use std::collections::VecDeque; + +use sov_mock_da::{MockAddress, MockBlob, MockBlockHeader, MockDaVerifier}; use sov_mock_zkvm::MockZkGuest; -use sov_rollup_interface::zk::LightClientCircuitInput; +use sov_rollup_interface::da::{BlobReaderTrait, DaDataLightClient, LatestDaState}; +use sov_rollup_interface::mmr::{InMemoryStore, MMRChunk, MMRGuest, MMRNative, MMRNodeHash}; +use sov_rollup_interface::zk::{LightClientCircuitInput, LightClientCircuitOutput}; use sov_rollup_interface::Network; -use test_utils::{create_mock_batch_proof, create_new_method_id_tx, create_prev_lcp_serialized}; +use test_utils::{ + create_mmr_hints, create_mock_batch_proof, create_new_method_id_tx, create_prev_lcp_serialized, + create_random_state_diff, create_serialized_mock_proof, +}; use crate::circuit::{run_circuit, LightClientVerificationError}; @@ -28,6 +35,7 @@ fn test_light_client_circuit_valid_da_valid_data() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -66,6 +74,7 @@ fn test_light_client_circuit_valid_da_valid_data() { light_client_proof_method_id, inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -103,6 +112,7 @@ fn test_wrong_order_da_blocks_should_still_work() { da_data: vec![blob_2, blob_1], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -144,6 +154,7 @@ fn create_unchainable_outputs_then_chain_them_on_next_block() { da_data: vec![blob_2, blob_1], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -193,6 +204,7 @@ fn create_unchainable_outputs_then_chain_them_on_next_block() { da_data: vec![blob_1], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -231,6 +243,7 @@ fn test_header_chain_proof_height_and_hash() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -269,6 +282,7 @@ fn test_header_chain_proof_height_and_hash() { light_client_proof_method_id, inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -307,6 +321,7 @@ fn test_unverifiable_batch_proofs() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![1], }; @@ -350,6 +365,7 @@ fn test_unverifiable_prev_light_client_proof() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![1], }; @@ -386,6 +402,7 @@ fn test_unverifiable_prev_light_client_proof() { light_client_proof_method_id, inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -425,6 +442,7 @@ fn test_new_method_id_txs() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -457,6 +475,7 @@ fn test_new_method_id_txs() { da_data: vec![blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -491,6 +510,7 @@ fn test_new_method_id_txs() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -535,6 +555,7 @@ fn test_expect_to_fail_on_correct_proof() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![1], }; @@ -572,6 +593,7 @@ fn test_expected_to_fail_proof_not_hinted() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -586,3 +608,999 @@ fn test_expected_to_fail_proof_not_hinted() { ) .unwrap(); } + +#[test] +fn test_light_client_circuit_verify_chunks() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(100); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..39700].to_vec(); + let chunk1_da_data = DaDataLightClient::Chunk(chunk1.clone()); + let chunk1_serialized = borsh::to_vec(&chunk1_da_data).expect("should serialize"); + + let mut blob1 = MockBlob::new( + chunk1_serialized.clone(), + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([1; 32]), + ); + blob1.full_data(); + + let chunk2 = serialized_mock_proof[39700..39700 * 2].to_vec(); + let chunk2_da_data = DaDataLightClient::Chunk(chunk2.clone()); + let chunk2_serialized = borsh::to_vec(&chunk2_da_data).expect("should serialize"); + + let mut blob2 = MockBlob::new( + chunk2_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([2; 32]), + ); + + blob2.full_data(); + + let chunk3 = serialized_mock_proof[39700 * 2..].to_vec(); + let chunk3_da_data = DaDataLightClient::Chunk(chunk3.clone()); + let chunk3_serialized = borsh::to_vec(&chunk3_da_data).expect("should serialize"); + + let mut blob3 = MockBlob::new( + chunk3_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([3; 32]), + ); + blob3.full_data(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![ + blob1.wtxid().unwrap(), + blob2.wtxid().unwrap(), + blob3.wtxid().unwrap(), + ], + vec![ + blob1.wtxid().unwrap(), + blob2.wtxid().unwrap(), + blob3.wtxid().unwrap(), + ], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + let input = LightClientCircuitInput { + previous_light_client_proof_journal: None, + light_client_proof_method_id, + da_block_header: block_header_1, + da_data: vec![blob1, blob2, blob3, blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints: Default::default(), + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + assert_eq!(output.state_root, [2; 32]); +} + +#[test] +fn test_missing_chunk() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(100); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..39700].to_vec(); + let chunk1_da_data = DaDataLightClient::Chunk(chunk1.clone()); + let chunk1_serialized = borsh::to_vec(&chunk1_da_data).expect("should serialize"); + + let mut blob1 = MockBlob::new( + chunk1_serialized.clone(), + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([1; 32]), + ); + blob1.full_data(); + + let chunk2 = serialized_mock_proof[39700..39700 * 2].to_vec(); + let chunk2_da_data = DaDataLightClient::Chunk(chunk2.clone()); + let chunk2_serialized = borsh::to_vec(&chunk2_da_data).expect("should serialize"); + + let mut blob2 = MockBlob::new( + chunk2_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([2; 32]), + ); + + blob2.full_data(); + + let chunk3 = serialized_mock_proof[39700 * 2..].to_vec(); + let chunk3_da_data = DaDataLightClient::Chunk(chunk3.clone()); + let chunk3_serialized = borsh::to_vec(&chunk3_da_data).expect("should serialize"); + + let mut blob3 = MockBlob::new( + chunk3_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([3; 32]), + ); + blob3.full_data(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![ + blob1.wtxid().unwrap(), + blob2.wtxid().unwrap(), + blob3.wtxid().unwrap(), + ], + vec![ + blob1.wtxid().unwrap(), + blob2.wtxid().unwrap(), + blob3.wtxid().unwrap(), + ], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + let input = LightClientCircuitInput { + previous_light_client_proof_journal: None, + light_client_proof_method_id, + da_block_header: block_header_1, + // Blob2 is not present + da_data: vec![blob1, blob3, blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints: Default::default(), + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + assert_eq!(output.state_root, l2_genesis_state_root); + assert_eq!(output.last_l2_height, 0); +} + +#[test] +fn test_mmr_hints() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(1); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..397].to_vec(); + + let chunk2 = serialized_mock_proof[397..397 * 2].to_vec(); + + let chunk3 = serialized_mock_proof[397 * 2..].to_vec(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![[1; 32], [2; 32], [3; 32]], + vec![[1; 32], [2; 32], [3; 32]], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + + let mut mmr = MMRNative::new(InMemoryStore::default()); + mmr.append(MMRChunk::new([1; 32], chunk1.clone())).unwrap(); + mmr.append(MMRChunk::new([2; 32], chunk2)).unwrap(); + mmr.append(MMRChunk::new([3; 32], chunk3)).unwrap(); + + let mut mmr_guest = MMRGuest::new(); + + let (mmr_chunk1, mmr_proof1) = mmr + .generate_proof([1; 32]) + .unwrap() + .expect("Chunk wtxid must exist"); + mmr_guest.append(mmr_chunk1.clone()); + + let (mmr_chunk2, mmr_proof2) = mmr + .generate_proof([2; 32]) + .unwrap() + .expect("Chunk wtxid must exist"); + mmr_guest.append(mmr_chunk2.clone()); + + let (mmr_chunk3, mmr_proof3) = mmr + .generate_proof([3; 32]) + .unwrap() + .expect("Chunk wtxid must exist"); + mmr_guest.append(mmr_chunk3.clone()); + + let mut mmr_hints = VecDeque::new(); + mmr_hints.push_back((mmr_chunk1, mmr_proof1)); + mmr_hints.push_back((mmr_chunk2, mmr_proof2)); + mmr_hints.push_back((mmr_chunk3, mmr_proof3)); + + let lcp_out = LightClientCircuitOutput { + state_root: l2_genesis_state_root, + light_client_proof_method_id, + latest_da_state: LatestDaState { + block_hash: block_header_1.prev_hash.0, + ..Default::default() + }, + unchained_batch_proofs_info: vec![], + last_l2_height: 0, + batch_proof_method_ids: vec![(0, [0, 0, 0, 0, 0, 0, 0, 0])], + mmr_guest, + }; + + let prev_lcp_out = create_prev_lcp_serialized(lcp_out, true); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(prev_lcp_out), + light_client_proof_method_id, + da_block_header: block_header_1, + // Only aggregate is present others are in mmr hints + da_data: vec![blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints, + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + assert_eq!(output.state_root, [2; 32]); + assert_eq!(output.last_l2_height, 101); +} + +#[test] +#[should_panic = "Failed to verify MMR proof for hint"] +fn test_malformed_mmr_proof_internal_index() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(1); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..397].to_vec(); + + let chunk2 = serialized_mock_proof[397..397 * 2].to_vec(); + + let chunk3 = serialized_mock_proof[397 * 2..].to_vec(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![[1; 32], [2; 32], [3; 32]], + vec![[1; 32], [2; 32], [3; 32]], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + + let mut mmr_guest = MMRGuest::new(); + let chunks = vec![ + ([1; 32], chunk1.clone()), + ([2; 32], chunk2.clone()), + ([3; 32], chunk3.clone()), + ]; + + let mut mmr_hints = create_mmr_hints(&mut mmr_guest, chunks); + mmr_hints[0].1.internal_idx = 2; + + // Malform the proofs + let internal_idx_proof1 = mmr_hints[0].1.internal_idx; + mmr_hints[0].1.internal_idx = mmr_hints[1].1.internal_idx; + mmr_hints[1].1.internal_idx = internal_idx_proof1; + + let lcp_out = LightClientCircuitOutput { + state_root: l2_genesis_state_root, + light_client_proof_method_id, + latest_da_state: LatestDaState { + block_hash: block_header_1.prev_hash.0, + ..Default::default() + }, + unchained_batch_proofs_info: vec![], + last_l2_height: 0, + batch_proof_method_ids: vec![(0, [0, 0, 0, 0, 0, 0, 0, 0])], + mmr_guest, + }; + + let prev_lcp_out = create_prev_lcp_serialized(lcp_out, true); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(prev_lcp_out), + light_client_proof_method_id, + da_block_header: block_header_1, + // Only aggregate is present others are in mmr hints + da_data: vec![blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints, + expected_to_fail_hint: vec![], + }; + + run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); +} + +#[test] +#[should_panic = "Failed to verify MMR proof for hint"] +fn test_malformed_mmr_proof_subroot_index() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(1); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..397].to_vec(); + + let chunk2 = serialized_mock_proof[397..397 * 2].to_vec(); + + let chunk3 = serialized_mock_proof[397 * 2..].to_vec(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![[1; 32], [2; 32], [3; 32]], + vec![[1; 32], [2; 32], [3; 32]], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + + let mut mmr_guest = MMRGuest::new(); + let chunks = vec![ + ([1; 32], chunk1.clone()), + ([2; 32], chunk2.clone()), + ([3; 32], chunk3.clone()), + ]; + + let mut mmr_hints = create_mmr_hints(&mut mmr_guest, chunks); + + // Malform the proofs + mmr_hints[0].1.subroot_idx = 2; + + let lcp_out = LightClientCircuitOutput { + state_root: l2_genesis_state_root, + light_client_proof_method_id, + latest_da_state: LatestDaState { + block_hash: block_header_1.prev_hash.0, + ..Default::default() + }, + unchained_batch_proofs_info: vec![], + last_l2_height: 0, + batch_proof_method_ids: vec![(0, [0, 0, 0, 0, 0, 0, 0, 0])], + mmr_guest, + }; + + let prev_lcp_out = create_prev_lcp_serialized(lcp_out, true); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(prev_lcp_out), + light_client_proof_method_id, + da_block_header: block_header_1, + // Only aggregate is present others are in mmr hints + da_data: vec![blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints, + expected_to_fail_hint: vec![], + }; + + run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); +} + +#[test] +#[should_panic = "Failed to verify MMR proof for hint"] +fn test_malformed_mmr_chunk_body() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(1); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..397].to_vec(); + + let chunk2 = serialized_mock_proof[397..397 * 2].to_vec(); + + let chunk3 = serialized_mock_proof[397 * 2..].to_vec(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![[1; 32], [2; 32], [3; 32]], + vec![[1; 32], [2; 32], [3; 32]], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + + let mut mmr_guest = MMRGuest::new(); + let chunks = vec![ + ([1; 32], chunk1.clone()), + ([2; 32], chunk2.clone()), + ([3; 32], chunk3.clone()), + ]; + + let mut mmr_hints = create_mmr_hints(&mut mmr_guest, chunks); + + // Malform the chunk body + mmr_hints[0].0.body.extend_from_slice(&[1, 2, 3, 4, 5]); + + let lcp_out = LightClientCircuitOutput { + state_root: l2_genesis_state_root, + light_client_proof_method_id, + latest_da_state: LatestDaState { + block_hash: block_header_1.prev_hash.0, + ..Default::default() + }, + unchained_batch_proofs_info: vec![], + last_l2_height: 0, + batch_proof_method_ids: vec![(0, [0, 0, 0, 0, 0, 0, 0, 0])], + mmr_guest, + }; + + let prev_lcp_out = create_prev_lcp_serialized(lcp_out, true); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(prev_lcp_out), + light_client_proof_method_id, + da_block_header: block_header_1, + // Only aggregate is present others are in mmr hints + da_data: vec![blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints, + expected_to_fail_hint: vec![], + }; + + run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); +} + +#[test] +fn test_malformed_mmr_chunk_wtxid() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(1); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..397].to_vec(); + + let chunk2 = serialized_mock_proof[397..397 * 2].to_vec(); + + let chunk3 = serialized_mock_proof[397 * 2..].to_vec(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![[1; 32], [2; 32], [3; 32]], + vec![[1; 32], [2; 32], [3; 32]], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + + let mut mmr_guest = MMRGuest::new(); + let chunks = vec![ + ([1; 32], chunk1.clone()), + ([2; 32], chunk2.clone()), + ([3; 32], chunk3.clone()), + ]; + + let mut mmr_hints = create_mmr_hints(&mut mmr_guest, chunks); + + // Malform the chunk wtxid + mmr_hints[0].0.wtxid = [88; 32]; + + let lcp_out = LightClientCircuitOutput { + state_root: l2_genesis_state_root, + light_client_proof_method_id, + latest_da_state: LatestDaState { + block_hash: block_header_1.prev_hash.0, + ..Default::default() + }, + unchained_batch_proofs_info: vec![], + last_l2_height: 0, + batch_proof_method_ids: vec![(0, [0, 0, 0, 0, 0, 0, 0, 0])], + mmr_guest, + }; + + let prev_lcp_out = create_prev_lcp_serialized(lcp_out, true); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(prev_lcp_out), + light_client_proof_method_id, + da_block_header: block_header_1, + // Only aggregate is present others are in mmr hints + da_data: vec![blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints, + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + assert_eq!(output.state_root, l2_genesis_state_root); + assert_eq!(output.last_l2_height, 0); + assert_eq!(output.mmr_guest.size, 3); + assert!(output.unchained_batch_proofs_info.is_empty()); +} + +#[test] +#[should_panic = "Failed to verify MMR proof for hint"] +fn test_malformed_mmr_inclusion_proof() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + + let state_diff = create_random_state_diff(1); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..397].to_vec(); + + let chunk2 = serialized_mock_proof[397..397 * 2].to_vec(); + + let chunk3 = serialized_mock_proof[397 * 2..].to_vec(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![[1; 32], [2; 32], [3; 32]], + vec![[1; 32], [2; 32], [3; 32]], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_1 = MockBlockHeader::from_height(1); + + let mut mmr_guest = MMRGuest::new(); + let chunks = vec![ + ([1; 32], chunk1.clone()), + ([2; 32], chunk2.clone()), + ([3; 32], chunk3.clone()), + ]; + + let mut mmr_hints = create_mmr_hints(&mut mmr_guest, chunks); + + // Malform the inclusion proof + mmr_hints[0].1.inclusion_proof.push(MMRNodeHash::default()); + + let lcp_out = LightClientCircuitOutput { + state_root: l2_genesis_state_root, + light_client_proof_method_id, + latest_da_state: LatestDaState { + block_hash: block_header_1.prev_hash.0, + ..Default::default() + }, + unchained_batch_proofs_info: vec![], + last_l2_height: 0, + batch_proof_method_ids: vec![(0, [0, 0, 0, 0, 0, 0, 0, 0])], + mmr_guest, + }; + + let prev_lcp_out = create_prev_lcp_serialized(lcp_out, true); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(prev_lcp_out), + light_client_proof_method_id, + da_block_header: block_header_1, + // Only aggregate is present others are in mmr hints + da_data: vec![blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints, + expected_to_fail_hint: vec![], + }; + + run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); +} + +#[test] +fn test_malicious_aggregate_should_not_work() { + let light_client_proof_method_id = [1u32; 8]; + let da_verifier = MockDaVerifier {}; + + let l2_genesis_state_root = [1u8; 32]; + let batch_prover_da_pub_key = [9; 32]; + let method_id_upgrade_authority = [11u8; 32]; + let block_header_1 = MockBlockHeader::from_height(1); + + let state_diff = create_random_state_diff(100); + + let serialized_mock_proof = create_serialized_mock_proof( + l2_genesis_state_root, + [2u8; 32], + 101, + true, + Some(state_diff), + ); + + let chunk1 = serialized_mock_proof[0..39700].to_vec(); + let chunk1_da_data = DaDataLightClient::Chunk(chunk1.clone()); + let chunk1_serialized = borsh::to_vec(&chunk1_da_data).expect("should serialize"); + + let mut blob1 = MockBlob::new( + chunk1_serialized.clone(), + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([1; 32]), + ); + blob1.full_data(); + + let chunk2 = serialized_mock_proof[39700..39700 * 2].to_vec(); + let chunk2_da_data = DaDataLightClient::Chunk(chunk2.clone()); + let chunk2_serialized = borsh::to_vec(&chunk2_da_data).expect("should serialize"); + + let mut blob2 = MockBlob::new( + chunk2_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([2; 32]), + ); + + blob2.full_data(); + + let mut mmr = MMRNative::new(InMemoryStore::default()); + mmr.append(MMRChunk::new([1; 32], chunk1.clone())).unwrap(); + mmr.append(MMRChunk::new([2; 32], chunk2)).unwrap(); + + let (mmr_chunk1, mmr_proof1) = mmr.generate_proof([1; 32]).unwrap().unwrap(); + let (mmr_chunk2, mmr_proof2) = mmr.generate_proof([2; 32]).unwrap().unwrap(); + let mmr_hints = vec![(mmr_chunk1, mmr_proof1), (mmr_chunk2, mmr_proof2)]; + + // First block has the two chunks + let input = LightClientCircuitInput { + previous_light_client_proof_journal: None, + light_client_proof_method_id, + da_block_header: block_header_1.clone(), + da_data: vec![blob1.clone(), blob2.clone()], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints: Default::default(), + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + assert_eq!(output.state_root, l2_genesis_state_root); + assert_eq!(output.last_l2_height, 0); + assert!(output.unchained_batch_proofs_info.is_empty()); + assert_eq!(output.mmr_guest.size, 2); + + let malicious_aggregate_da_data = DaDataLightClient::Aggregate( + vec![blob1.wtxid().unwrap(), blob2.wtxid().unwrap()], + vec![blob1.wtxid().unwrap(), blob2.wtxid().unwrap()], + ); + let malicious_aggregate_serialized = + borsh::to_vec(&malicious_aggregate_da_data).expect("should serialize"); + + // Malicious blob sent, takes 2/3 of the chunks and tries to break the circuit + let mut malicious_blob = MockBlob::new( + malicious_aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([99; 32]), + ); + malicious_blob.full_data(); + + let block_header_2 = MockBlockHeader::from_height(2); + + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(create_prev_lcp_serialized(output, true)), + light_client_proof_method_id, + da_block_header: block_header_2, + da_data: vec![malicious_blob], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints: mmr_hints.clone().into(), + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + // The malicious did not work no state updates or panics + assert_eq!(output.state_root, l2_genesis_state_root); + assert_eq!(output.last_l2_height, 0); + assert!(output.unchained_batch_proofs_info.is_empty()); + assert_eq!(output.mmr_guest.size, 2); + + let chunk3 = serialized_mock_proof[39700 * 2..].to_vec(); + let chunk3_da_data = DaDataLightClient::Chunk(chunk3.clone()); + let chunk3_serialized = borsh::to_vec(&chunk3_da_data).expect("should serialize"); + + // Last chhunk + let mut blob3 = MockBlob::new( + chunk3_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([3; 32]), + ); + blob3.full_data(); + + let aggregate_da_data = DaDataLightClient::Aggregate( + vec![ + blob1.wtxid().unwrap(), + blob2.wtxid().unwrap(), + blob3.wtxid().unwrap(), + ], + vec![ + blob1.wtxid().unwrap(), + blob2.wtxid().unwrap(), + blob3.wtxid().unwrap(), + ], + ); + + let aggregate_serialized = borsh::to_vec(&aggregate_da_data).expect("should serialize"); + + let mut blob4 = MockBlob::new( + aggregate_serialized, + MockAddress::new([9u8; 32]), + [0u8; 32], + Some([4; 32]), + ); + blob4.full_data(); + + let block_header_3 = MockBlockHeader::from_height(3); + let input = LightClientCircuitInput { + previous_light_client_proof_journal: Some(create_prev_lcp_serialized(output, true)), + light_client_proof_method_id, + da_block_header: block_header_3, + da_data: vec![blob3, blob4], + inclusion_proof: [1u8; 32], + completeness_proof: (), + mmr_hints: mmr_hints.into(), + expected_to_fail_hint: vec![], + }; + + let output = run_circuit::<_, MockZkGuest>( + da_verifier.clone(), + input, + l2_genesis_state_root, + INITIAL_BATCH_PROOF_METHOD_IDS.to_vec(), + &batch_prover_da_pub_key.clone(), + &method_id_upgrade_authority, + Network::Nightly, + ) + .unwrap(); + + // When last chunk is sent with the correct aggregate we can see the state update + assert_eq!(output.state_root, [2; 32]); + assert_eq!(output.last_l2_height, 101); + assert!(output.unchained_batch_proofs_info.is_empty()); + assert_eq!(output.mmr_guest.size, 2); +} diff --git a/crates/light-client-prover/src/tests/test_utils.rs b/crates/light-client-prover/src/tests/test_utils.rs index 688b67738..f3c3f39b3 100644 --- a/crates/light-client-prover/src/tests/test_utils.rs +++ b/crates/light-client-prover/src/tests/test_utils.rs @@ -1,8 +1,10 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, VecDeque}; +use rand::{thread_rng, Rng}; use sov_mock_da::{MockAddress, MockBlob, MockDaSpec, MockHash}; use sov_mock_zkvm::{MockCodeCommitment, MockJournal, MockProof}; use sov_rollup_interface::da::{BatchProofMethodId, BlobReaderTrait, DaDataLightClient}; +use sov_rollup_interface::mmr::{InMemoryStore, MMRChunk, MMRGuest, MMRInclusionProof, MMRNative}; use sov_rollup_interface::zk::{BatchProofCircuitOutput, LightClientCircuitOutput}; pub(crate) fn create_mock_batch_proof( @@ -45,12 +47,51 @@ pub(crate) fn create_mock_batch_proof( let da_data = DaDataLightClient::Complete(mock_serialized); let da_data_ser = borsh::to_vec(&da_data).expect("should serialize"); - let mut blob = MockBlob::new(da_data_ser, MockAddress::new([9u8; 32]), [0u8; 32]); + let mut blob = MockBlob::new(da_data_ser, MockAddress::new([9u8; 32]), [0u8; 32], None); blob.full_data(); blob } +pub(crate) fn create_serialized_mock_proof( + initial_state_root: [u8; 32], + final_state_root: [u8; 32], + last_l2_height: u64, + is_valid: bool, + state_diff: Option, Option>>>, +) -> Vec { + let batch_proof_method_id = MockCodeCommitment([2u8; 32]); + + let bp = BatchProofCircuitOutput:: { + initial_state_root, + final_state_root, + prev_soft_confirmation_hash: [3; 32], + final_soft_confirmation_hash: [4; 32], + state_diff: state_diff.unwrap_or_default(), + da_slot_hash: MockHash([5; 32]), + sequencer_commitments_range: (0, 0), + sequencer_public_key: [9; 32].to_vec(), + sequencer_da_public_key: [9; 32].to_vec(), + last_l2_height, + preproven_commitments: vec![], + }; + + let bp_serialized = borsh::to_vec(&bp).expect("should serialize"); + + let serialized_journal = match is_valid { + true => borsh::to_vec(&MockJournal::Verifiable(bp_serialized.clone())).unwrap(), + false => borsh::to_vec(&MockJournal::Unverifiable(bp_serialized.clone())).unwrap(), + }; + + let mock_proof = MockProof { + program_id: batch_proof_method_id.clone(), + is_valid: true, + log: serialized_journal.clone(), + }; + + mock_proof.encode_to_vec() +} + pub(crate) fn create_prev_lcp_serialized( output: LightClientCircuitOutput, is_valid: bool, @@ -74,8 +115,64 @@ pub(crate) fn create_new_method_id_tx( let da_data_ser = borsh::to_vec(&da_data).expect("should serialize"); - let mut blob = MockBlob::new(da_data_ser, MockAddress::new(pub_key), [0u8; 32]); + let mut blob = MockBlob::new(da_data_ser, MockAddress::new(pub_key), [0u8; 32], None); blob.full_data(); blob } + +pub(crate) fn create_random_state_diff(size_in_kb: u64) -> BTreeMap, Option>> { + let mut rng = thread_rng(); + let mut map = BTreeMap::new(); + let mut total_size: u64 = 0; + + // Convert size to bytes + let size_in_bytes = size_in_kb * 1024; + + while total_size < size_in_bytes { + // Generate a random 32-byte key + let key: Vec = (0..32).map(|_| rng.gen::()).collect(); + + // Randomly decide if the value is `None` or a `Vec` of random length + let value: Option> = if rng.gen_bool(0.1) { + None + } else { + let value_size: usize = rng.gen_range(1..=2048); + Some((0..value_size).map(|_| rng.gen::()).collect()) + }; + + // Calculate the size of the key and value + let key_size = key.len() as u64; + let value_size = match &value { + Some(v) => v.len() as u64 + 1, + None => 1, + }; + + // Add to the map + map.insert(key, value); + + // Update the total size + total_size += key_size + value_size; + } + + map +} + +pub(crate) fn create_mmr_hints( + mmr_guest: &mut MMRGuest, + chunks: Vec<([u8; 32], Vec)>, +) -> VecDeque<(MMRChunk, MMRInclusionProof)> { + let mut mmr = MMRNative::new(InMemoryStore::default()); + for chunk in chunks.iter() { + mmr.append(MMRChunk::new(chunk.0, chunk.1.clone())).unwrap(); + } + + let mut mmr_hints = VecDeque::new(); + for chunk in chunks.iter() { + let (chunk, proof) = mmr.generate_proof(chunk.0).unwrap().unwrap(); + mmr_guest.append(chunk.clone()); + mmr_hints.push_back((chunk, proof)); + } + + mmr_hints +} diff --git a/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs b/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs index c6703d4ce..4df38144f 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs @@ -150,8 +150,8 @@ mod tests { header: MockBlockHeader::from_height(at_height), is_valid: true, blobs: vec![ - MockBlob::new(vec![2; 44], MockAddress::new([1; 32]), [2; 32]), - MockBlob::new(vec![3; 12], MockAddress::new([2; 32]), [5; 32]), + MockBlob::new(vec![2; 44], MockAddress::new([1; 32]), [2; 32], None), + MockBlob::new(vec![3; 12], MockAddress::new([2; 32]), [5; 32], None), ], } } diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index a96a4035e..3e8203611 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -223,6 +223,7 @@ impl MockDaService { zkp_proof, self.sequencer_da_address.clone(), data_hash, + None, ); let header = MockBlockHeader { prev_hash: previous_block_hash, @@ -349,6 +350,11 @@ impl DaService for MockDaService { type Error = anyhow::Error; type BlockHash = [u8; 32]; + /// Decompress and deserialize chunks + fn decompress_chunks(&self, complete_chunks: &[u8]) -> Result, Self::Error> { + Ok(complete_chunks.to_vec()) + } + /// Gets block at given height /// If block is not available, waits until it is /// It is possible to read non-finalized and last finalized blocks multiple times diff --git a/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs b/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs index 9d981dd1c..0f5eb1260 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs @@ -177,6 +177,7 @@ pub struct MockDaVerifier {} pub struct MockBlob { pub(crate) address: MockAddress, pub(crate) hash: [u8; 32], + pub(crate) wtxid: Option<[u8; 32]>, /// Actual data from the blob. Public for testing purposes. pub data: CountedBufReader, // Data for the aggregated ZK proof. @@ -185,12 +186,18 @@ pub struct MockBlob { impl MockBlob { /// Creates a new mock blob with the given data, claiming to have been published by the provided address. - pub fn new(data: Vec, address: MockAddress, hash: [u8; 32]) -> Self { + pub fn new( + data: Vec, + address: MockAddress, + hash: [u8; 32], + wtxid: Option<[u8; 32]>, + ) -> Self { Self { address, data: CountedBufReader::new(Bytes::from(data)), zk_proofs_data: Default::default(), hash, + wtxid, } } @@ -200,12 +207,14 @@ impl MockBlob { zk_proofs_data: Vec, address: MockAddress, hash: [u8; 32], + wtxid: Option<[u8; 32]>, ) -> Self { Self { address, hash, data: CountedBufReader::new(Bytes::from(data)), zk_proofs_data, + wtxid, } } } diff --git a/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs b/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs index 74416cedd..3af6101d6 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs @@ -17,6 +17,10 @@ impl BlobReaderTrait for MockBlob { self.hash } + fn wtxid(&self) -> Option<[u8; 32]> { + self.wtxid + } + fn verified_data(&self) -> &[u8] { self.data.accumulator() } @@ -30,6 +34,10 @@ impl BlobReaderTrait for MockBlob { self.data.advance(num_bytes); self.verified_data() } + + fn serialize_v1(&self) -> borsh::io::Result> { + borsh::to_vec(self) + } } /// A [`sov_rollup_interface::da::DaSpec`] suitable for testing. @@ -51,6 +59,10 @@ impl DaVerifier for MockDaVerifier { type Error = anyhow::Error; + fn decompress_chunks(&self, complete_chunks: &[u8]) -> Result, Self::Error> { + Ok(complete_chunks.to_vec()) + } + fn new(_params: ::ChainParams) -> Self { Self {} } diff --git a/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs b/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs index f0d88c094..a2f782abf 100644 --- a/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs +++ b/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs @@ -75,7 +75,7 @@ impl MockProof { /// Tries to deserialize a proof from a byte slice. pub fn decode(input: &[u8]) -> Result { - Ok(Self::try_from_slice(input).unwrap()) + Self::try_from_slice(input).map_err(|e| anyhow::anyhow!(e)) } } @@ -252,7 +252,7 @@ impl sov_rollup_interface::zk::Zkvm for MockZkGuest { } fn extract_raw_output(serialized_proof: &[u8]) -> Result, Self::Error> { - let mock_proof = MockProof::decode(serialized_proof).unwrap(); + let mock_proof = MockProof::decode(serialized_proof)?; Ok(mock_proof.log) } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs index b1e99aec4..00ad4ff27 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs @@ -19,6 +19,8 @@ pub mod schema; /// This is primarily used as the backing store for the [JMT(JellyfishMerkleTree)](https://docs.rs/jmt/latest/jmt/). pub mod state_db; +pub mod mmr_db; + /// Implements a wrapper around RocksDB meant for storing state only accessible /// outside of the zkVM execution environment, as this data is not included in /// the JMT and does not contribute to proofs of execution. diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/mmr_db.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/mmr_db.rs new file mode 100644 index 000000000..db60a897a --- /dev/null +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/mmr_db.rs @@ -0,0 +1,90 @@ +#![allow(missing_docs)] +use std::sync::Arc; + +use sov_rollup_interface::mmr::NodeStore; +use sov_schema_db::DB; +use tracing::instrument; + +use crate::rocks_db_config::RocksdbConfig; +use crate::schema::tables::{MMRChunks, MMRNodes, MMRTreeSize, MMR_TABLES}; + +#[derive(Clone, Debug)] +pub struct MmrDB { + db: Arc, +} + +impl MmrDB { + const DB_PATH_SUFFIX: &'static str = "mmr"; + const DB_NAME: &'static str = "mmr-db"; + + /// Initialize [`sov_schema_db::DB`] that should be used by snapshots. + pub fn setup_schema_db(cfg: &RocksdbConfig) -> anyhow::Result { + let raw_options = cfg.as_raw_options(false); + let mmr_db_path = cfg.path.join(Self::DB_PATH_SUFFIX); + sov_schema_db::DB::open( + mmr_db_path, + Self::DB_NAME, + MMR_TABLES.iter().copied(), + &raw_options, + ) + } + + /// Open a [`MMRDB`] (backed by RocksDB) at the specified path. + #[instrument(level = "trace", skip_all, err)] + pub fn new(cfg: &RocksdbConfig) -> Result { + let path = cfg.path.join(Self::DB_PATH_SUFFIX); + let raw_options = cfg.as_raw_options(false); + let tables: Vec<_> = MMR_TABLES.iter().map(|e| e.to_string()).collect(); + let inner = DB::open(path, Self::DB_NAME, tables, &raw_options)?; + + Ok(Self { + db: Arc::new(inner), + }) + } +} + +impl NodeStore for MmrDB { + fn save_node( + &mut self, + level: u32, + index: u32, + node_hash: sov_rollup_interface::mmr::MMRNodeHash, + ) -> anyhow::Result<()> { + self.db.put::(&(level, index), &node_hash) + } + + fn load_node( + &self, + level: u32, + index: u32, + ) -> anyhow::Result> { + self.db.get::(&(level, index)) + } + + fn get_tree_size(&self) -> u32 { + self.db + .get::(&()) + .ok() + .flatten() + .unwrap_or_default() + } + + fn set_tree_size(&mut self, size: u32) -> anyhow::Result<()> { + self.db.put::(&(), &size) + } + + fn save_chunk( + &mut self, + hash: sov_rollup_interface::mmr::MMRNodeHash, + chunk: sov_rollup_interface::mmr::MMRChunk, + ) -> anyhow::Result<()> { + self.db.put::(&hash, &chunk) + } + + fn load_chunk( + &self, + hash: sov_rollup_interface::mmr::MMRNodeHash, + ) -> anyhow::Result> { + self.db.get::(&hash) + } +} diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index f8d1554c0..244ee4c37 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -13,6 +13,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey}; use jmt::Version; use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::mmr::{MMRChunk, MMRNodeHash, Wtxid}; use sov_rollup_interface::stf::StateDiff; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; use sov_schema_db::{CodecError, SeekKeyEncoder}; @@ -23,6 +24,14 @@ use super::types::{ StoredSoftConfirmation, StoredVerifiedProof, }; +/// A list of all tables used by the StateDB. These tables store rollup state - meaning +/// account balances, nonces, etc. +pub const MMR_TABLES: &[&str] = &[ + MMRNodes::table_name(), + MMRTreeSize::table_name(), + MMRChunks::table_name(), +]; + /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. pub const STATE_TABLES: &[&str] = &[ @@ -321,6 +330,21 @@ define_table_with_seek_key_codec!( (LastPrunedBlock) () => u64 ); +define_table_with_seek_key_codec!( + /// Stores the chunk's hash of an MMR + (MMRNodes) (u32, u32) => MMRNodeHash +); + +define_table_with_seek_key_codec!( + /// Stores the chunk's content by hash + (MMRChunks) Wtxid => MMRChunk +); + +define_table_with_seek_key_codec!( + /// Stores the MMR tree size + (MMRTreeSize) () => u32 +); + #[cfg(test)] define_table_with_seek_key_codec!( /// Test table old diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs index 2a40ae9c5..e92acb733 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use borsh::{BorshDeserialize, BorshSerialize}; use sov_rollup_interface::da::LatestDaState; +use sov_rollup_interface::mmr::MMRGuest; use sov_rollup_interface::rpc::{ BatchProofOutputRpcResponse, BatchProofResponse, HexTx, LatestDaStateRpcResponse, LightClientProofOutputRpcResponse, LightClientProofResponse, SoftConfirmationResponse, @@ -94,6 +95,8 @@ pub struct StoredLightClientProofOutput { pub last_l2_height: u64, /// L2 activation height of the fork and the Method ids of the batch proofs that were verified in the light client proof pub batch_proof_method_ids: Vec<(u64, [u32; 8])>, + /// A list of unprocessed chunks + pub mmr_guest: MMRGuest, } impl From for LightClientProofOutputRpcResponse { @@ -112,6 +115,7 @@ impl From for LightClientProofOutputRpcResponse { unchained_batch_proofs_info: value.unchained_batch_proofs_info, last_l2_height: value.last_l2_height, batch_proof_method_ids: value.batch_proof_method_ids, + mmr_guest: value.mmr_guest, } } } @@ -133,6 +137,7 @@ impl From for StoredLightClientProofOutput { unchained_batch_proofs_info: circuit_output.unchained_batch_proofs_info, last_l2_height: circuit_output.last_l2_height, batch_proof_method_ids: circuit_output.batch_proof_method_ids, + mmr_guest: circuit_output.mmr_guest, } } } @@ -154,6 +159,7 @@ impl From for LightClientCircuitOutput { unchained_batch_proofs_info: db_output.unchained_batch_proofs_info, last_l2_height: db_output.last_l2_height, batch_proof_method_ids: db_output.batch_proof_method_ids, + mmr_guest: db_output.mmr_guest, } } } diff --git a/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs b/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs index a72a43077..af1eed527 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs @@ -2,6 +2,9 @@ /// type. #[cfg(feature = "macros")] pub use sov_modules_macros::DispatchCall; +/// Implements ForkCodec trait. Requires the type to be enum. +#[cfg(feature = "macros")] +pub use sov_modules_macros::ForkCodec; /// Derives the [`Genesis`](trait.Genesis.html) trait for the underlying runtime /// `struct`. #[cfg(feature = "macros")] diff --git a/crates/sovereign-sdk/module-system/sov-modules-macros/src/fork_codec.rs b/crates/sovereign-sdk/module-system/sov-modules-macros/src/fork_codec.rs new file mode 100644 index 000000000..6bd5f1b5e --- /dev/null +++ b/crates/sovereign-sdk/module-system/sov-modules-macros/src/fork_codec.rs @@ -0,0 +1,67 @@ +use proc_macro2::Span; +use quote::quote; +use syn::{Data, DeriveInput, Error}; + +pub fn derive_fork_codec(input: DeriveInput) -> Result { + // Extract the name of the enum + let name = &input.ident; + + // Ensure it's an enum + let Data::Enum(data_enum) = &input.data else { + return Err(Error::new( + Span::call_site(), + "ForkCodec can only be derived for enums. Use borsh derive directly instead.", + )); + }; + + // Extract variants + let variants = &data_enum.variants; + + // Generate match arms for encode and decode + let encode_arms = variants.iter().map(|variant| { + let variant_name = &variant.ident; + quote! { + Self::#variant_name(inner) => borsh::to_vec(inner).map_err(|e| e.into()), + } + }); + + let decode_arms = variants.iter().enumerate().map(|(index, variant)| { + let variant_name = &variant.ident; + quote! { + #index => { + let inner = borsh::from_slice(slice)?; + Ok(Self::#variant_name(inner)) + } + } + }); + + // Fallback for remaining SpecId variants + let fallback_variant = &variants.last().unwrap().ident; + let fallback_arm = quote! { + _ => { + let inner = borsh::from_slice(slice)?; + Ok(Self::#fallback_variant(inner)) + } + }; + + // Generate the implementation + let expanded = quote! { + impl sov_rollup_interface::fork::ForkCodec for #name { + fn encode(&self) -> anyhow::Result> { + match self { + #(#encode_arms)* + } + } + + fn decode(bytes: impl AsRef<[u8]>, spec: sov_rollup_interface::spec::SpecId) -> anyhow::Result { + let slice = bytes.as_ref(); + match spec as u8 as usize { + #(#decode_arms)* + #fallback_arm + } + } + } + }; + + Ok(proc_macro::TokenStream::from(expanded)) +} diff --git a/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs b/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs index e7dafae87..c94e8dca5 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs @@ -14,6 +14,7 @@ mod cli_parser; mod common; mod default_runtime; mod dispatch; +mod fork_codec; mod make_constants; mod manifest; mod module_call_json_schema; @@ -27,6 +28,7 @@ use default_runtime::DefaultRuntimeMacro; use dispatch::dispatch_call::DispatchCallMacro; use dispatch::genesis::GenesisMacro; use dispatch::message_codec::MessageCodec; +use fork_codec::derive_fork_codec; use make_constants::{make_const, PartialItemConst}; use module_call_json_schema::derive_module_call_json_schema; use module_info::ModuleType; @@ -212,9 +214,16 @@ pub fn cli_parser(input: TokenStream) -> TokenStream { let cli_parser = CliParserMacro::new("Cmd"); handle_macro_error(cli_parser.cli_macro(input)) } + #[cfg(feature = "native")] #[proc_macro_derive(CliWalletArg)] pub fn custom_enum_clap(input: TokenStream) -> TokenStream { let input: syn::DeriveInput = parse_macro_input!(input); handle_macro_error(derive_cli_wallet_arg(input)) } + +#[proc_macro_derive(ForkCodec)] +pub fn fork_codec_derive(input: TokenStream) -> TokenStream { + let input: syn::DeriveInput = parse_macro_input!(input); + handle_macro_error(derive_fork_codec(input)) +} diff --git a/crates/sovereign-sdk/rollup-interface/Cargo.toml b/crates/sovereign-sdk/rollup-interface/Cargo.toml index 5342b08c9..92dffae8d 100644 --- a/crates/sovereign-sdk/rollup-interface/Cargo.toml +++ b/crates/sovereign-sdk/rollup-interface/Cargo.toml @@ -21,10 +21,10 @@ borsh = { workspace = true } bytes = { workspace = true, optional = true, default-features = true } digest = { workspace = true } futures = { workspace = true, optional = true } -jmt = { workspace = true, optional = true } hex = { workspace = true } +jmt = { workspace = true, optional = true } serde = { workspace = true } -sha2 = { workspace = true, optional = true } +sha2 = { workspace = true } thiserror = { workspace = true, optional = true } # TODO: Remove tokio when https://github.com/Sovereign-Labs/sovereign-sdk/issues/1161 is resolved tokio = { workspace = true, optional = true } @@ -46,6 +46,5 @@ std = [ "hex/default", "jmt", "serde/default", - "sha2", "thiserror", ] diff --git a/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs b/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs index c7e03fb9b..b4f536593 100644 --- a/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs @@ -5,6 +5,8 @@ mod migration; #[cfg(test)] mod tests; +use alloc::vec::Vec; + pub use manager::*; pub use migration::*; @@ -79,3 +81,55 @@ pub fn fork_pos_from_block_number(forks: &[Fork], block_number: u64) -> usize { Err(idx) => idx.saturating_sub(1), } } + +/// ForkCodec is the serialization trait for types that require forking when changed. +/// Optimal usecase would be the type to be versioned enum, and do untagged enum ser/de. +/// +/// Example: +/// +/// ``` +/// use sov_rollup_interface::fork::ForkCodec; +/// use sov_rollup_interface::spec::SpecId; +/// +/// #[derive(borsh::BorshSerialize, borsh::BorshDeserialize)] +/// struct InputV1 {} +/// +/// #[derive(borsh::BorshSerialize, borsh::BorshDeserialize)] +/// struct InputV2 {} +/// +/// enum Input { +/// V1(InputV1), +/// V2(InputV2), +/// } +/// +/// impl Input { +/// pub fn new_v1(v1: InputV1) -> Self { +/// Self::V1(v1) +/// } +/// +/// pub fn new_v2(v2: InputV2) -> Self { +/// Self::V2(v2) +/// } +/// } +/// +/// impl ForkCodec for Input { +/// fn encode(&self) -> anyhow::Result> { +/// match self { +/// Self::V1(v1) => Ok(borsh::to_vec(v1)?), +/// Self::V2(v2) => Ok(borsh::to_vec(v2)?), +/// } +/// } +/// +/// fn decode(bytes: impl AsRef<[u8]>, spec: SpecId) -> anyhow::Result { +/// let slice = bytes.as_ref(); +/// match spec { +/// SpecId::Genesis => Ok(Self::new_v1(borsh::from_slice(slice)?)), +/// SpecId::Fork1 => Ok(Self::new_v2(borsh::from_slice(slice)?)), +/// } +/// } +/// } +/// ``` +pub trait ForkCodec: Sized { + fn encode(&self) -> anyhow::Result>; + fn decode(bytes: impl AsRef<[u8]>, spec: SpecId) -> anyhow::Result; +} diff --git a/crates/sovereign-sdk/rollup-interface/src/lib.rs b/crates/sovereign-sdk/rollup-interface/src/lib.rs index 2d5e35369..605bdb3b8 100644 --- a/crates/sovereign-sdk/rollup-interface/src/lib.rs +++ b/crates/sovereign-sdk/rollup-interface/src/lib.rs @@ -14,24 +14,21 @@ extern crate alloc; #[cfg(feature = "native")] pub const CITREA_VERSION: &str = "v0.5.5"; -mod state_machine; -pub use state_machine::*; - +/// Fork module +pub mod fork; +pub mod mmr; mod network; -pub use network::*; - mod node; +/// Specs module +pub mod spec; +mod state_machine; #[cfg(not(target_has_atomic = "ptr"))] pub use alloc::rc::Rc as RefCount; #[cfg(target_has_atomic = "ptr")] pub use alloc::sync::Arc as RefCount; +pub use network::*; pub use node::*; +pub use state_machine::*; pub use {anyhow, digest}; - -/// Fork module -pub mod fork; - -/// Specs module -pub mod spec; diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/guest.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/guest.rs new file mode 100644 index 000000000..de47e0696 --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/guest.rs @@ -0,0 +1,55 @@ +use alloc::vec::Vec; + +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use super::{hash_pair, MMRChunk, MMRInclusionProof}; + +#[derive( + Default, Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize, +)] +pub struct MMRGuest { + pub subroots: Vec<[u8; 32]>, + pub size: u32, +} + +impl MMRGuest { + pub fn new() -> Self { + MMRGuest { + subroots: Vec::new(), + size: 0, + } + } + + pub fn append(&mut self, chunk: MMRChunk) { + let mut current = chunk.hash(); + let mut size = self.size; + + while size % 2 == 1 { + let sibling = self.subroots.pop().unwrap(); + current = hash_pair(sibling, current); + size /= 2; + } + + self.subroots.push(current); + self.size += 1; + } + + pub fn verify_proof(&self, chunk: &MMRChunk, mmr_proof: &MMRInclusionProof) -> bool { + let mut current_hash = chunk.hash(); + + for (i, sibling) in mmr_proof.inclusion_proof.iter().enumerate() { + if mmr_proof.internal_idx & (1 << i) == 0 { + current_hash = hash_pair(current_hash, *sibling); + } else { + current_hash = hash_pair(*sibling, current_hash); + } + } + + if mmr_proof.subroot_idx >= self.subroots.len() as u32 { + return false; // Subroot index is out of bounds + } + + self.subroots[mmr_proof.subroot_idx as usize] == current_hash + } +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/mod.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/mod.rs new file mode 100644 index 000000000..b7ba9c28e --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/mod.rs @@ -0,0 +1,88 @@ +#![allow(missing_docs)] +use alloc::vec::Vec; + +use anyhow::Result; +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +mod guest; +#[cfg(any(feature = "native", feature = "testing"))] +mod native; +#[cfg(any(feature = "native", feature = "testing"))] +mod test_utils; +#[cfg(test)] +mod tests; + +pub use guest::*; +#[cfg(any(feature = "native", feature = "testing"))] +pub use native::*; +#[cfg(any(feature = "native", feature = "testing"))] +pub use test_utils::*; + +pub type MMRNodeHash = [u8; 32]; +pub type Wtxid = [u8; 32]; + +pub trait NodeStore { + fn save_node(&mut self, level: u32, index: u32, hash: MMRNodeHash) -> Result<()>; + fn load_node(&self, level: u32, index: u32) -> Result>; + fn save_chunk(&mut self, hash: MMRNodeHash, chunk: MMRChunk) -> Result<()>; + fn load_chunk(&self, hash: MMRNodeHash) -> Result>; + fn get_tree_size(&self) -> u32; + fn set_tree_size(&mut self, size: u32) -> Result<()>; +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct MMRInclusionProof { + pub subroot_idx: u32, + pub internal_idx: u32, + pub inclusion_proof: Vec, +} + +impl MMRInclusionProof { + pub fn new(subroot_idx: u32, internal_idx: u32, inclusion_proof: Vec) -> Self { + MMRInclusionProof { + subroot_idx, + internal_idx, + inclusion_proof, + } + } + + pub fn get_subroot(&self, leaf: MMRNodeHash) -> MMRNodeHash { + let mut current_hash = leaf; + for (i, sibling) in self.inclusion_proof.iter().enumerate() { + if self.internal_idx & (1 << i) == 0 { + current_hash = hash_pair(current_hash, *sibling); + } else { + current_hash = hash_pair(*sibling, current_hash); + } + } + current_hash + } +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct MMRChunk { + pub wtxid: Wtxid, + pub body: Vec, +} + +impl MMRChunk { + pub fn new(wtxid: Wtxid, body: Vec) -> Self { + MMRChunk { wtxid, body } + } + + pub fn hash(&self) -> MMRNodeHash { + let mut hasher = Sha256::default(); + hasher.update(self.wtxid); + hasher.update(&self.body); + hasher.finalize().into() + } +} + +pub fn hash_pair(left: MMRNodeHash, right: MMRNodeHash) -> MMRNodeHash { + let mut hasher = Sha256::default(); + hasher.update(left); + hasher.update(right); + hasher.finalize().into() +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/native.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/native.rs new file mode 100644 index 000000000..11c570f50 --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/native.rs @@ -0,0 +1,164 @@ +use alloc::vec; +use alloc::vec::Vec; +use std::collections::BTreeMap; + +use anyhow::Result; +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use super::{hash_pair, MMRChunk, MMRInclusionProof, MMRNodeHash, NodeStore, Wtxid}; + +#[derive( + Default, Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize, +)] +pub struct MMRNative { + pub store: S, + pub cache: BTreeMap<(u32, u32), MMRNodeHash>, +} + +impl MMRNative { + pub fn new(store: S) -> Self { + let mut mmr = MMRNative { + store, + cache: BTreeMap::new(), + }; + mmr.recalculate_peaks().unwrap(); + mmr + } + + pub fn append(&mut self, chunk: MMRChunk) -> Result<()> { + let hash = chunk.hash(); + self.store.save_chunk(hash, chunk)?; + let current_size = self.store.get_tree_size(); + self.store.save_node(0, current_size, hash)?; + self.cache.insert((0, current_size), hash); + self.store.set_tree_size(current_size + 1)?; + self.recalculate_peaks()?; + Ok(()) + } + + pub fn contains(&mut self, wtxid: Wtxid) -> Result { + self.find_chunk_index_with_wtxid(wtxid) + .map(|idx| idx.is_some()) + } + + fn recalculate_peaks(&mut self) -> Result<()> { + let mut size = self.store.get_tree_size(); + let mut level = 0; + + while size > 1 { + if size % 2 == 0 { + let left = self.load_node(level, size - 2)?.unwrap(); + let right = self.load_node(level, size - 1)?.unwrap(); + let parent = hash_pair(left, right); + + self.store.save_node(level + 1, size / 2 - 1, parent)?; + self.cache.insert((level + 1, size / 2 - 1), parent); + } + size /= 2; + level += 1; + } + Ok(()) + } + + pub fn generate_proof( + &mut self, + wtxid: Wtxid, + ) -> Result> { + let Some(index) = self.find_chunk_index_with_wtxid(wtxid)? else { + return Ok(None); + }; + + let mut proof: Vec = vec![]; + let mut current_index = index; + let mut current_level = 0; + + while current_index % 2 == 1 || self.load_node(current_level, current_index + 1)?.is_some() + { + let sibling_index = if current_index % 2 == 0 { + current_index + 1 + } else { + current_index - 1 + }; + proof.push(self.load_node(current_level, sibling_index)?.unwrap()); + current_index /= 2; + current_level += 1; + } + + let chunk = self + .store + .load_chunk(self.store.load_node(0, index)?.expect("Should be found"))? + .expect("Should be found"); + + let (subroot_idx, internal_idx) = self.get_helpers_from_index(index); + let mmr_proof = MMRInclusionProof::new(subroot_idx, internal_idx, proof); + + Ok(Some((chunk, mmr_proof))) + } + + fn load_node(&mut self, level: u32, index: u32) -> Result> { + if let Some(&hash) = self.cache.get(&(level, index)) { + Ok(Some(hash)) + } else { + let Some(node) = self.store.load_node(level, index)? else { + return Ok(None); + }; + + self.cache.insert((level, index), node); + + Ok(Some(node)) + } + } + + // TODO: Could be implemented better + fn find_chunk_index_with_wtxid(&mut self, wtxid: Wtxid) -> Result> { + let size = self.store.get_tree_size(); + for i in 0..size { + if let Some(node_hash) = self.load_node(0, i)? { + if let Some(chunk) = self.store.load_chunk(node_hash)? { + if chunk.wtxid == wtxid { + return Ok(Some(i)); + } + } + } + } + Ok(None) + } + + fn get_helpers_from_index(&self, index: u32) -> (u32, u32) { + let xor = self.store.get_tree_size() ^ index; + let xor_leading_digit = 31 - xor.leading_zeros(); + let internal_idx = index & ((1 << xor_leading_digit) - 1); + let leading_zeros_size = 31 - self.store.get_tree_size().leading_zeros(); + let mut subtree_idx = 0; + for i in xor_leading_digit + 1..=leading_zeros_size { + if self.store.get_tree_size() & (1 << i) != 0 { + subtree_idx += 1; + } + } + (subtree_idx, internal_idx) + } + + pub fn verify_proof(&mut self, chunk: MMRChunk, mmr_proof: &MMRInclusionProof) -> bool { + let subroot = mmr_proof.get_subroot(chunk.hash()); + let subroots = self.get_subroots(); + subroots[mmr_proof.subroot_idx as usize] == subroot + } + + pub(crate) fn get_subroots(&mut self) -> Vec { + let mut subroots: Vec = vec![]; + let mut size = self.store.get_tree_size(); + let mut level = 0; + + while size > 0 { + if size % 2 == 1 { + let subroot = self.load_node(level, size - 1).ok().flatten().unwrap(); + subroots.push(subroot); + } + size /= 2; + level += 1; + } + subroots.reverse(); + subroots + } +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/test_utils.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/test_utils.rs new file mode 100644 index 000000000..7edcfdbdc --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/test_utils.rs @@ -0,0 +1,39 @@ +use std::collections::BTreeMap; + +use super::*; + +#[derive(Clone, Default)] +pub struct InMemoryStore { + storage: BTreeMap<(u32, u32), MMRNodeHash>, + chunks: BTreeMap, + tree_size: u32, +} + +impl NodeStore for InMemoryStore { + fn save_node(&mut self, level: u32, index: u32, hash: MMRNodeHash) -> Result<()> { + self.storage.insert((level, index), hash); + Ok(()) + } + + fn load_node(&self, level: u32, index: u32) -> Result> { + Ok(self.storage.get(&(level, index)).cloned()) + } + + fn save_chunk(&mut self, hash: MMRNodeHash, chunk: MMRChunk) -> Result<()> { + self.chunks.insert(hash, chunk); + Ok(()) + } + + fn load_chunk(&self, hash: MMRNodeHash) -> Result> { + Ok(self.chunks.get(&hash).cloned()) + } + + fn get_tree_size(&self) -> u32 { + self.tree_size + } + + fn set_tree_size(&mut self, size: u32) -> Result<()> { + self.tree_size = size; + Ok(()) + } +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/tests.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/tests.rs new file mode 100644 index 000000000..69def1d2d --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/tests.rs @@ -0,0 +1,175 @@ +use super::*; + +#[test] +fn test_mmr_native() { + let mut mmr = MMRNative::new(InMemoryStore::default()); + let mut nodes = vec![]; + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + nodes.push(node.clone()); + + mmr.append(node).unwrap(); + + for j in 0..=i { + let proof_node = nodes[j as usize].clone(); + let (node, mmr_proof) = mmr.generate_proof(proof_node.wtxid).ok().flatten().unwrap(); + assert!(mmr.verify_proof(node.clone(), &mmr_proof)); + } + } +} + +#[test] +fn test_mmr_native_simple() { + let store = InMemoryStore::default(); + let mut mmr = MMRNative::new(store.clone()); + + let chunk1 = MMRChunk::new([1; 32], vec![10, 20, 30]); + let chunk2 = MMRChunk::new([2; 32], vec![40, 50, 60]); + let chunk3 = MMRChunk::new([3; 32], vec![70, 80, 90]); + + mmr.append(chunk1.clone()).unwrap(); + mmr.append(chunk2.clone()).unwrap(); + mmr.append(chunk3.clone()).unwrap(); + + let proof = mmr.generate_proof([1; 32]).unwrap(); + assert!(proof.is_some()); + let (chunk, mmr_proof) = proof.unwrap(); + assert_eq!(chunk, chunk1); + assert!(mmr.verify_proof(chunk, &mmr_proof)); +} + +#[test] +fn test_native_proof_with_guest_verification() { + let mut mmr_native = MMRNative::new(InMemoryStore::default()); + let mut mmr_guest = MMRGuest::new(); + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + + // Append to both Native and Guest + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node.clone()); + + // Generate proof in Native and verify in Guest + for j in 0..=i { + let proof_node = MMRChunk::new([j as u8; 32], vec![j as u8; 8]); + let (_, mmr_proof) = mmr_native + .generate_proof(proof_node.wtxid) + .ok() + .flatten() + .unwrap(); + + // Verify proof using Guest + assert!(mmr_guest.verify_proof(&proof_node, &mmr_proof)); + } + } +} + +#[test] +fn test_consistency_between_native_and_guest() { + let mut mmr_native = MMRNative::new(InMemoryStore::default()); + let mut mmr_guest = MMRGuest::new(); + + for i in 0..10 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node.clone()); + } + + // Check subroots consistency + let native_subroots = mmr_native.get_subroots(); + assert_eq!(native_subroots, mmr_guest.subroots); +} + +#[test] +fn test_large_dataset_verification() { + let mut mmr_native = MMRNative::new(InMemoryStore::default()); + let mut mmr_guest = MMRGuest::new(); + let mut nodes = vec![]; + + for i in 0..100 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 16]; + let node = MMRChunk::new(wtxid, body); + nodes.push(node.clone()); + + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node.clone()); + } + + for node in nodes { + let (_, mmr_proof) = mmr_native + .generate_proof(node.wtxid) + .ok() + .flatten() + .unwrap(); + assert!(mmr_guest.verify_proof(&node, &mmr_proof)); + } +} + +#[test] +fn test_mmr_with_store() { + let store = InMemoryStore::default(); + let mut mmr = MMRNative::new(store); + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + mmr.append(node).unwrap(); + } + + let mut mmr = MMRNative::new(mmr.store.clone()); + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + let (_, proof) = mmr.generate_proof(wtxid).ok().flatten().unwrap(); + assert!(mmr.verify_proof(node, &proof)); + } +} + +#[test] +fn test_fool_mmr_verify() { + let mut mmr_guest = MMRGuest::new(); + let mut mmr_native = MMRNative::new(InMemoryStore::default()); + let mut nodes = vec![]; + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 80]; + let node = MMRChunk::new(wtxid, body); + nodes.push(node.clone()); + + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node); + } + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 80]; + let node = MMRChunk::new(wtxid, body); + let (chunk_from_tree, proof) = mmr_native.generate_proof(wtxid).unwrap().unwrap(); + + assert_eq!(chunk_from_tree.body, node.body); + assert_eq!(chunk_from_tree.wtxid, node.wtxid); + + assert!(mmr_guest.verify_proof(&node, &proof)); + } + + let (chunk_native, proof) = mmr_native.generate_proof([5; 32]).unwrap().unwrap(); + + let chunk_wrong_wtxid = MMRChunk::new([6; 32], chunk_native.body); + let chunk_wrong_body = MMRChunk::new(chunk_native.wtxid, vec![6; 80]); + + assert!(!mmr_guest.verify_proof(&chunk_wrong_wtxid, &proof)); + assert!(!mmr_guest.verify_proof(&chunk_wrong_body, &proof)); +} diff --git a/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs b/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs index 6cf80301e..87c3c39ab 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs @@ -11,6 +11,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use crate::da::SequencerCommitment; +use crate::mmr::MMRGuest; use crate::soft_confirmation::SignedSoftConfirmation; use crate::zk::{BatchProofInfo, CumulativeStateDiff}; @@ -175,6 +176,10 @@ pub struct LightClientProofOutputRpcResponse { pub last_l2_height: u64, /// L2 activation height of the fork and the Method ids of the batch proofs that were verified in the light client proof pub batch_proof_method_ids: Vec<(u64, [u32; 8])>, + /// A map from tx hash to chunk data. + /// MMRGuest is an impl. MMR, which only needs to hold considerably small amount of data. + /// like 32 hashes and some u64 + pub mmr_guest: MMRGuest, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index 002c67b3c..125d391b9 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -107,6 +107,9 @@ pub trait DaService: Send + Sync + 'static { ::CompletenessProof, ); + /// Decompress and deserialize the chunks into a single complete proof. + fn decompress_chunks(&self, complete_chunks: &[u8]) -> Result, Self::Error>; + /// Send a transaction directly to the DA layer. /// blob is the serialized and signed transaction. /// Returns nothing if the transaction was successfully sent. diff --git a/crates/sovereign-sdk/rollup-interface/src/spec.rs b/crates/sovereign-sdk/rollup-interface/src/spec.rs index bf303ed88..a5c1e6da7 100644 --- a/crates/sovereign-sdk/rollup-interface/src/spec.rs +++ b/crates/sovereign-sdk/rollup-interface/src/spec.rs @@ -19,6 +19,7 @@ use serde::{Deserialize, Serialize}; Deserialize, Hash, )] +#[repr(u8)] #[borsh(use_discriminant = true)] pub enum SpecId { /// Genesis spec diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs index 65385f7ca..8bcc45e86 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs @@ -60,7 +60,7 @@ pub enum DaDataLightClient { /// A zk proof and state diff Complete(Proof), /// A list of tx ids - Aggregate(Vec<[u8; 32]>), + Aggregate(Vec<[u8; 32]>, Vec<[u8; 32]>), /// A chunk of an aggregate Chunk(Vec), /// A new batch proof method_id @@ -161,7 +161,7 @@ pub trait DaSpec: } /// Latest da state to verify and apply da block changes -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, PartialEq)] +#[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize, PartialEq)] pub struct LatestDaState { /// Proved DA block's header hash /// This is used to compare the previous DA block hash with first batch proof's DA block hash @@ -212,6 +212,9 @@ pub trait DaVerifier: Send + Sync { block_header: &::BlockHeader, network: Network, ) -> Result; + + /// Decompress chunks to complete + fn decompress_chunks(&self, complete_chunks: &[u8]) -> Result, Self::Error>; } #[cfg(feature = "std")] @@ -294,6 +297,9 @@ pub trait BlobReaderTrait: /// Returns the hash of the blob as it appears on the DA layer fn hash(&self) -> [u8; 32]; + /// Returns the witness transaction ID of the blob as it appears on the DA layer + fn wtxid(&self) -> Option<[u8; 32]>; + /// Returns a slice containing all the data accessible to the rollup at this point in time. /// When running in native mode, the rollup can extend this slice by calling `advance`. In zk-mode, /// the rollup is limited to only the verified data. @@ -324,6 +330,9 @@ pub trait BlobReaderTrait: fn full_data(&mut self) -> &[u8] { self.advance(self.total_len()) } + + /// Weird method to serialize blob as v1. Should be removed when a better way is introduced in the future. + fn serialize_v1(&self) -> borsh::io::Result>; } /// Trait with collection of trait bounds for a block hash. diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs index a5c28d210..9f33b5a4b 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs @@ -18,8 +18,10 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; +use super::da::BlobReaderTrait; use super::soft_confirmation::SignedSoftConfirmationV1; use crate::da::{DaSpec, LatestDaState}; +use crate::mmr::{MMRChunk, MMRGuest, MMRInclusionProof}; use crate::soft_confirmation::SignedSoftConfirmation; use crate::spec::SpecId; @@ -296,7 +298,14 @@ impl, + /// A map from tx hash to chunk data. + /// MMRGuest is an impl. MMR, which only needs to hold considerably small amount of data. + /// like 32 hashes and some u64 + pub mmr_guest: MMRGuest, } /// The input of light client proof @@ -425,13 +438,13 @@ pub struct LightClientCircuitInput { pub completeness_proof: Da::CompletenessProof, /// DA block header that the batch proofs were found in. pub da_block_header: Da::BlockHeader, - /// Light client proof method id pub light_client_proof_method_id: [u32; 8], /// Light client proof output /// Optional because the first light client proof doesn't have a previous proof pub previous_light_client_proof_journal: Option>, - + /// Hints for the guest MMR tree. + pub mmr_hints: VecDeque<(MMRChunk, MMRInclusionProof)>, /// Hint for which proofs are expected to fail /// /// Note: Indices are u32 even though we don't expect that many proofs